text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _longest_val_in_column(self, col): """ get size of longest value in specific column :param col: str, column name :return int """
try: # +2 is for implicit separator return max([len(x[col]) for x in self.table if x[col]]) + 2 except KeyError: logger.error("there is no column %r", col) raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _init(self): """ initialize all values based on provided input :return: None """
self.col_count = len(self.col_list) # list of lengths of longest entries in columns self.col_longest = self.get_all_longest_col_lengths() self.data_length = sum(self.col_longest.values()) if self.terminal_width > 0: # free space is space which should be equeally distributed for all columns # self.terminal_width -- terminal is our canvas # - self.data_length -- substract length of content (the actual data) # - self.col_count + 1 -- table lines are not part of free space, their width is # (number of columns - 1) self.total_free_space = (self.terminal_width - self.data_length) - self.col_count + 1 if self.total_free_space <= 0: self.total_free_space = None else: self.default_column_space = self.total_free_space // self.col_count self.default_column_space_remainder = self.total_free_space % self.col_count logger.debug("total free space: %d, column space: %d, remainder: %d, columns: %d", self.total_free_space, self.default_column_space, self.default_column_space_remainder, self.col_count) else: self.total_free_space = None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _count_sizes(self): """ count all values needed to display whole table <> HEADER | HEADER2 | HEADER3 <> kudos to PostgreSQL developers :return: None """
format_list = [] header_sepa_format_list = [] # actual widths of columns self.col_widths = {} for col in self.col_list: col_length = self.col_longest[col] col_width = col_length + self._separate() # -2 is for implicit separator -- spaces around format_list.append(" {%s:%d} " % (col, col_width - 2)) header_sepa_format_list.append("{%s:%d}" % (col, col_width)) self.col_widths[col] = col_width logger.debug("column widths %s", self.col_widths) self.format_str = "|".join(format_list) self.header_format_str = "+".join(header_sepa_format_list) self.header_data = {} for k in self.col_widths: self.header_data[k] = "-" * self.col_widths[k]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all_longest_col_lengths(self): """ iterate over all columns and get their longest values :return: dict, {"column_name": 132} """
response = {} for col in self.col_list: response[col] = self._longest_val_in_column(col) return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _separate(self): """ get a width of separator for current column :return: int """
if self.total_free_space is None: return 0 else: sepa = self.default_column_space # we need to distribute remainders if self.default_column_space_remainder > 0: sepa += 1 self.default_column_space_remainder -= 1 logger.debug("remainder: %d, separator: %d", self.default_column_space_remainder, sepa) return sepa
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def render(self): """ print provided table :return: None """
print(self.format_str.format(**self.header), file=sys.stderr) print(self.header_format_str.format(**self.header_data), file=sys.stderr) for row in self.data: print(self.format_str.format(**row))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_params(self, **kwargs): """ set parameters in the user parameters these parameters are accepted: :param git_uri: str, uri of the git repository for the source :param git_ref: str, commit ID of the branch to be pulled :param git_branch: str, branch name of the branch to be pulled :param base_image: str, name of the parent image :param name_label: str, label of the parent image :param user: str, name of the user requesting the build :param component: str, name of the component :param release: str, :param build_image: str, :param build_imagestream: str, :param build_from: str, :param build_type: str, orchestrator or worker :param platforms: list of str, platforms to build on :param platform: str, platform :param koji_target: str, koji tag with packages used to build the image :param koji_task_id: str, koji ID :param koji_parent_build: str, :param koji_upload_dir: str, koji directory where the completed image will be uploaded :param flatpak: if we should build a Flatpak OCI Image :param flatpak_base_image: str, name of the Flatpack OCI Image :param reactor_config_map: str, name of the config map containing the reactor environment :param reactor_config_override: dict, data structure for reactor config to be injected as an environment variable into a worker build; when used, reactor_config_map is ignored. :param yum_repourls: list of str, uris of the yum repos to pull from :param signing_intent: bool, True to sign the resulting image :param compose_ids: list of int, ODCS composes to use instead of generating new ones :param filesystem_koji_task_id: int, Koji Task that created the base filesystem :param platform_node_selector: dict, a nodeselector for a user_paramsific platform :param scratch_build_node_selector: dict, a nodeselector for scratch builds :param explicit_build_node_selector: dict, a nodeselector for explicit builds :param auto_build_node_selector: dict, a nodeselector for auto builds :param isolated_build_node_selector: dict, a nodeselector for isolated builds :param operator_manifests_extract_platform: str, indicates which platform should upload operator manifests to koji :param parent_images_digests: dict, mapping image digests to names and platforms """
# Here we cater to the koji "scratch" build type, this will disable # all plugins that might cause importing of data to koji self.scratch = kwargs.get('scratch') # When true, it indicates build was automatically started by # OpenShift via a trigger, for instance ImageChangeTrigger self.is_auto = kwargs.pop('is_auto', False) # An isolated build is meant to patch a certain release and not # update transient tags in container registry self.isolated = kwargs.get('isolated') self.osbs_api = kwargs.pop('osbs_api', None) self.validate_build_variation() self.base_image = kwargs.get('base_image') self.platform_node_selector = kwargs.get('platform_node_selector', {}) self.scratch_build_node_selector = kwargs.get('scratch_build_node_selector', {}) self.explicit_build_node_selector = kwargs.get('explicit_build_node_selector', {}) self.auto_build_node_selector = kwargs.get('auto_build_node_selector', {}) self.isolated_build_node_selector = kwargs.get('isolated_build_node_selector', {}) logger.debug("now setting params '%s' for user_params", kwargs) self.user_params.set_params(**kwargs) self.source_registry = None self.organization = None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_data_from_reactor_config(self): """ Sets data from reactor config """
reactor_config_override = self.user_params.reactor_config_override.value reactor_config_map = self.user_params.reactor_config_map.value data = None if reactor_config_override: data = reactor_config_override elif reactor_config_map: config_map = self.osbs_api.get_config_map(reactor_config_map) data = config_map.get_data_by_key('config.yaml') if not data: if self.user_params.flatpak.value: raise OsbsValidationException("flatpak_base_image must be provided") else: return source_registry_key = 'source_registry' registry_organization_key = 'registries_organization' req_secrets_key = 'required_secrets' token_secrets_key = 'worker_token_secrets' flatpak_key = 'flatpak' flatpak_base_image_key = 'base_image' if source_registry_key in data: self.source_registry = data[source_registry_key] if registry_organization_key in data: self.organization = data[registry_organization_key] if self.user_params.flatpak.value: flatpack_base_image = data.get(flatpak_key, {}).get(flatpak_base_image_key, None) if flatpack_base_image: self.base_image = flatpack_base_image self.user_params.base_image.value = flatpack_base_image else: raise OsbsValidationException("flatpak_base_image must be provided") required_secrets = data.get(req_secrets_key, []) token_secrets = data.get(token_secrets_key, []) self._set_required_secrets(required_secrets, token_secrets)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _set_required_secrets(self, required_secrets, token_secrets): """ Sets required secrets """
if self.user_params.build_type.value == BUILD_TYPE_ORCHESTRATOR: required_secrets += token_secrets if not required_secrets: return secrets = self.template['spec']['strategy']['customStrategy'].setdefault('secrets', []) existing = set(secret_mount['secretSource']['name'] for secret_mount in secrets) required_secrets = set(required_secrets) already_set = required_secrets.intersection(existing) if already_set: logger.debug("secrets %s are already set", already_set) for secret in required_secrets - existing: secret_path = os.path.join(SECRETS_PATH, secret) logger.info("Configuring %s secret at %s", secret, secret_path) secrets.append({ 'secretSource': { 'name': secret, }, 'mountPath': secret_path, })
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def adjust_for_isolated(self): """ Remove certain plugins in order to handle the "isolated build" scenario. """
if self.user_params.isolated.value: remove_plugins = [ ("prebuild_plugins", "check_and_set_rebuild"), ("prebuild_plugins", "stop_autorebuild_if_disabled") ] for when, which in remove_plugins: self.pt.remove_plugin(when, which, 'removed from isolated build request')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def adjust_for_flatpak(self): """ Remove plugins that don't work when building Flatpaks """
if self.user_params.flatpak.value: remove_plugins = [ ("prebuild_plugins", "resolve_composes"), # We'll extract the filesystem anyways for a Flatpak instead of exporting # the docker image directly, so squash just slows things down. ("prepublish_plugins", "squash"), # Pulp can't currently handle Flatpaks, which are OCI images ("postbuild_plugins", "pulp_push"), ("postbuild_plugins", "pulp_tag"), ("postbuild_plugins", "pulp_sync"), ("exit_plugins", "pulp_publish"), ("exit_plugins", "pulp_pull"), # delete_from_registry is used for deleting builds from the temporary registry # that pulp_sync mirrors from. ("exit_plugins", "delete_from_registry"), ] for when, which in remove_plugins: self.pt.remove_plugin(when, which, 'not needed for flatpak build')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def render_customizations(self): """ Customize template for site user specified customizations """
disable_plugins = self.pt.customize_conf.get('disable_plugins', []) if not disable_plugins: logger.debug('No site-user specified plugins to disable') else: for plugin in disable_plugins: try: self.pt.remove_plugin(plugin['plugin_type'], plugin['plugin_name'], 'disabled at user request') except KeyError: # Malformed config logger.info('Invalid custom configuration found for disable_plugins') enable_plugins = self.pt.customize_conf.get('enable_plugins', []) if not enable_plugins: logger.debug('No site-user specified plugins to enable"') else: for plugin in enable_plugins: try: msg = 'enabled at user request' self.pt.add_plugin(plugin['plugin_type'], plugin['plugin_name'], plugin['plugin_args'], msg) except KeyError: # Malformed config logger.info('Invalid custom configuration found for enable_plugins')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def render_koji(self): """ if there is yum repo in user params, don't pick stuff from koji """
phase = 'prebuild_plugins' plugin = 'koji' if not self.pt.has_plugin_conf(phase, plugin): return if self.user_params.yum_repourls.value: self.pt.remove_plugin(phase, plugin, 'there is a yum repo user parameter') elif not self.pt.set_plugin_arg_valid(phase, plugin, "target", self.user_params.koji_target.value): self.pt.remove_plugin(phase, plugin, 'no koji target supplied in user parameters')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def render_check_and_set_platforms(self): """ If the check_and_set_platforms plugin is present, configure it """
phase = 'prebuild_plugins' plugin = 'check_and_set_platforms' if not self.pt.has_plugin_conf(phase, plugin): return if self.user_params.koji_target.value: self.pt.set_plugin_arg(phase, plugin, "koji_target", self.user_params.koji_target.value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all_build_configs_by_labels(self, label_selectors): """ Returns all builds matching a given set of label selectors. It is up to the calling function to filter the results. """
labels = ['%s=%s' % (field, value) for field, value in label_selectors] labels = ','.join(labels) url = self._build_url("buildconfigs/", labelSelector=labels) return self._get(url).json()['items']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_build_config_by_labels(self, label_selectors): """ Returns a build config matching the given label selectors. This method will raise OsbsException if not exactly one build config is found. """
items = self.get_all_build_configs_by_labels(label_selectors) if not items: raise OsbsException( "Build config not found for labels: %r" % (label_selectors, )) if len(items) > 1: raise OsbsException( "More than one build config found for labels: %r" % (label_selectors, )) return items[0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_build_config_by_labels_filtered(self, label_selectors, filter_key, filter_value): """ Returns a build config matching the given label selectors, filtering against another predetermined value. This method will raise OsbsException if not exactly one build config is found after filtering. """
items = self.get_all_build_configs_by_labels(label_selectors) if filter_value is not None: build_configs = [] for build_config in items: match_value = graceful_chain_get(build_config, *filter_key.split('.')) if filter_value == match_value: build_configs.append(build_config) items = build_configs if not items: raise OsbsException( "Build config not found for labels: %r" % (label_selectors, )) if len(items) > 1: raise OsbsException( "More than one build config found for labels: %r" % (label_selectors, )) return items[0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stream_logs(self, build_id): """ stream logs from build :param build_id: str :return: iterator """
kwargs = {'follow': 1} # If connection is closed within this many seconds, give up: min_idle_timeout = 60 # Stream logs, but be careful of the connection closing # due to idle timeout. In that case, try again until the # call returns more quickly than a reasonable timeout # would be set to. last_activity = time.time() while True: buildlogs_url = self._build_url("builds/%s/log/" % build_id, **kwargs) try: response = self._get(buildlogs_url, stream=1, headers={'Connection': 'close'}) check_response(response) for line in response.iter_lines(): last_activity = time.time() yield line # NOTE1: If self._get causes ChunkedEncodingError, ConnectionError, # or IncompleteRead to be raised, they'll be wrapped in # OsbsNetworkException or OsbsException # NOTE2: If iter_lines causes ChunkedEncodingError # or IncompleteRead to be raised, it'll simply be silenced. # NOTE3: An exception may be raised from # check_response(). In this case, exception will be # wrapped in OsbsException or OsbsNetworkException, # inspect cause to detect ConnectionError. except OsbsException as exc: if not isinstance(exc.cause, ConnectionError): raise idle = time.time() - last_activity logger.debug("connection closed after %ds", idle) if idle < min_idle_timeout: # Finish output return since = int(idle - 1) logger.debug("fetching logs starting from %ds ago", since) kwargs['sinceSeconds'] = since
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list_builds(self, build_config_id=None, koji_task_id=None, field_selector=None, labels=None): """ List builds matching criteria :param build_config_id: str, only list builds created from BuildConfig :param koji_task_id: str, only list builds for Koji Task ID :param field_selector: str, field selector for query :return: HttpResponse """
query = {} selector = '{key}={value}' label = {} if labels is not None: label.update(labels) if build_config_id is not None: label['buildconfig'] = build_config_id if koji_task_id is not None: label['koji-task-id'] = str(koji_task_id) if label: query['labelSelector'] = ','.join([selector.format(key=key, value=value) for key, value in label.items()]) if field_selector is not None: query['fieldSelector'] = field_selector url = self._build_url("builds/", **query) return self._get(url)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_resource_quota(self, name, quota_json): """ Prevent builds being scheduled and wait for running builds to finish. :return: """
url = self._build_k8s_url("resourcequotas/") response = self._post(url, data=json.dumps(quota_json), headers={"Content-Type": "application/json"}) if response.status_code == http_client.CONFLICT: url = self._build_k8s_url("resourcequotas/%s" % name) response = self._put(url, data=json.dumps(quota_json), headers={"Content-Type": "application/json"}) check_response(response) return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def adjust_attributes_on_object(self, collection, name, things, values, how): """ adjust labels or annotations on object labels have to match RE: (([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])? and have at most 63 chars :param collection: str, object collection e.g. 'builds' :param name: str, name of object :param things: str, 'labels' or 'annotations' :param values: dict, values to set :param how: callable, how to adjust the values e.g. self._replace_metadata_things :return: """
url = self._build_url("%s/%s" % (collection, name)) response = self._get(url) logger.debug("before modification: %s", response.content) build_json = response.json() how(build_json['metadata'], things, values) response = self._put(url, data=json.dumps(build_json), use_json=True) check_response(response) return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_annotations_on_build(self, build_id, annotations): """ set annotations on build object :param build_id: str, id of build :param annotations: dict, annotations to set :return: """
return self.adjust_attributes_on_object('builds', build_id, 'annotations', annotations, self._update_metadata_things)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(self): """ Extract tabular data as |TableData| instances from a Line-delimited JSON file. |load_source_desc_file| :return: Loaded table data iterator. |load_table_name_desc| :rtype: |TableData| iterator :raises pytablereader.DataError: If the data is invalid Line-delimited JSON. :raises pytablereader.error.ValidationError: If the data is not acceptable Line-delimited JSON format. """
formatter = JsonLinesTableFormatter(self.load_dict()) formatter.accept(self) return formatter.to_table_data()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(self): """ Load table data from a Google Spreadsheet. This method consider :py:attr:`.source` as a path to the credential JSON file to access Google Sheets API. The method automatically search the header row start from :py:attr:`.start_row`. The condition of the header row is that all of the columns have value (except empty columns). :return: Loaded table data. Return one |TableData| for each sheet in the workbook. The table name for data will be determined by :py:meth:`~.GoogleSheetsTableLoader.make_table_name`. :rtype: iterator of |TableData| :raises pytablereader.DataError: If the header row is not found. :raises pytablereader.OpenError: If the spread sheet not found. """
import gspread from oauth2client.service_account import ServiceAccountCredentials self._validate_table_name() self._validate_title() scope = ["https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/drive"] credentials = ServiceAccountCredentials.from_json_keyfile_name(self.source, scope) gc = gspread.authorize(credentials) try: for worksheet in gc.open(self.title).worksheets(): self._worksheet = worksheet self.__all_values = [row for row in worksheet.get_all_values()] if self._is_empty_sheet(): continue try: self.__strip_empty_col() except ValueError: continue value_matrix = self.__all_values[self._get_start_row_idx() :] try: headers = value_matrix[0] rows = value_matrix[1:] except IndexError: continue self.inc_table_count() yield TableData( self.make_table_name(), headers, rows, dp_extractor=self.dp_extractor, type_hints=self._extract_type_hints(headers), ) except gspread.exceptions.SpreadsheetNotFound: raise OpenError("spreadsheet '{}' not found".format(self.title)) except gspread.exceptions.APIError as e: raise APIError(e)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def buildconfig_update(orig, new, remove_nonexistent_keys=False): """Performs update of given `orig` BuildConfig with values from `new` BuildConfig. Both BuildConfigs have to be represented as `dict`s. This function: - adds all key/value pairs to `orig` from `new` that are missing - replaces values in `orig` for keys that are in both - removes key/value pairs from `orig` for keys that are not in `new`, but only in dicts nested inside `strategy` key (see https://github.com/projectatomic/osbs-client/pull/273#issuecomment-148038314) """
if isinstance(orig, dict) and isinstance(new, dict): clean_triggers(orig, new) if remove_nonexistent_keys: missing = set(orig.keys()) - set(new.keys()) for k in missing: orig.pop(k) for k, v in new.items(): if k == 'strategy': remove_nonexistent_keys = True if isinstance(orig.get(k), dict) and isinstance(v, dict): buildconfig_update(orig[k], v, remove_nonexistent_keys) else: orig[k] = v
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def checkout_git_repo(git_url, target_dir=None, commit=None, retry_times=GIT_MAX_RETRIES, branch=None, depth=None): """ clone provided git repo to target_dir, optionally checkout provided commit yield the ClonedRepoData and delete the repo when finished :param git_url: str, git repo to clone :param target_dir: str, filesystem path where the repo should be cloned :param commit: str, commit to checkout, SHA-1 or ref :param retry_times: int, number of retries for git clone :param branch: str, optional branch of the commit, required if depth is provided :param depth: int, optional expected depth :return: str, int, commit ID of HEAD """
tmpdir = tempfile.mkdtemp() target_dir = target_dir or os.path.join(tmpdir, "repo") try: yield clone_git_repo(git_url, target_dir, commit, retry_times, branch, depth) finally: shutil.rmtree(tmpdir)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clone_git_repo(git_url, target_dir=None, commit=None, retry_times=GIT_MAX_RETRIES, branch=None, depth=None): """ clone provided git repo to target_dir, optionally checkout provided commit :param git_url: str, git repo to clone :param target_dir: str, filesystem path where the repo should be cloned :param commit: str, commit to checkout, SHA-1 or ref :param retry_times: int, number of retries for git clone :param branch: str, optional branch of the commit, required if depth is provided :param depth: int, optional expected depth :return: str, int, commit ID of HEAD """
retry_delay = GIT_BACKOFF_FACTOR target_dir = target_dir or os.path.join(tempfile.mkdtemp(), "repo") commit = commit or "master" logger.info("cloning git repo '%s'", git_url) logger.debug("url = '%s', dir = '%s', commit = '%s'", git_url, target_dir, commit) cmd = ["git", "clone"] if branch: cmd += ["-b", branch, "--single-branch"] if depth: cmd += ["--depth", str(depth)] elif depth: logger.warning("branch not provided for %s, depth setting ignored", git_url) depth = None cmd += [git_url, target_dir] logger.debug("cloning '%s'", cmd) repo_commit = '' repo_depth = None for counter in range(retry_times + 1): try: # we are using check_output, even though we aren't using # the return value, but we will get 'output' in exception subprocess.check_output(cmd, stderr=subprocess.STDOUT) repo_commit, repo_depth = reset_git_repo(target_dir, commit, depth) break except subprocess.CalledProcessError as exc: if counter != retry_times: logger.info("retrying command '%s':\n '%s'", cmd, exc.output) time.sleep(retry_delay * (2 ** counter)) else: raise OsbsException("Unable to clone git repo '%s' " "branch '%s'" % (git_url, branch), cause=exc, traceback=sys.exc_info()[2]) return ClonedRepoData(target_dir, repo_commit, repo_depth)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reset_git_repo(target_dir, git_reference, retry_depth=None): """ hard reset git clone in target_dir to given git_reference :param target_dir: str, filesystem path where the repo is cloned :param git_reference: str, any valid git reference :param retry_depth: int, if the repo was cloned with --shallow, this is the expected depth of the commit :return: str and int, commit ID of HEAD and commit depth of git_reference """
deepen = retry_depth or 0 base_commit_depth = 0 for _ in range(GIT_FETCH_RETRY): try: if not deepen: cmd = ['git', 'rev-list', '--count', git_reference] base_commit_depth = int(subprocess.check_output(cmd, cwd=target_dir)) - 1 cmd = ["git", "reset", "--hard", git_reference] logger.debug("Resetting current HEAD: '%s'", cmd) subprocess.check_call(cmd, cwd=target_dir) break except subprocess.CalledProcessError: if not deepen: raise OsbsException('cannot find commit %s in repo %s' % (git_reference, target_dir)) deepen *= 2 cmd = ["git", "fetch", "--depth", str(deepen)] subprocess.check_call(cmd, cwd=target_dir) logger.debug("Couldn't find commit %s, increasing depth with '%s'", git_reference, cmd) else: raise OsbsException('cannot find commit %s in repo %s' % (git_reference, target_dir)) cmd = ["git", "rev-parse", "HEAD"] logger.debug("getting SHA-1 of provided ref '%s'", git_reference) commit_id = subprocess.check_output(cmd, cwd=target_dir, universal_newlines=True) commit_id = commit_id.strip() logger.info("commit ID = %s", commit_id) final_commit_depth = None if not deepen: cmd = ['git', 'rev-list', '--count', 'HEAD'] final_commit_depth = int(subprocess.check_output(cmd, cwd=target_dir)) - base_commit_depth return commit_id, final_commit_depth
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_imagestreamtag_from_image(image): """ return ImageStreamTag, give a FROM value :param image: str, the FROM value from the Dockerfile :return: str, ImageStreamTag """
ret = image # Remove the registry part ret = strip_registry_from_image(image) # ImageStream names cannot contain '/' ret = ret.replace('/', '-') # If there is no ':' suffix value, add one if ret.find(':') == -1: ret += ":latest" return ret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_time_from_rfc3339(rfc3339): """ return time tuple from an RFC 3339-formatted time string :param rfc3339: str, time in RFC 3339 format :return: float, seconds since the Epoch """
try: # py 3 dt = dateutil.parser.parse(rfc3339, ignoretz=False) return dt.timestamp() except NameError: # py 2 # Decode the RFC 3339 date with no fractional seconds (the # format Origin provides). Note that this will fail to parse # valid ISO8601 timestamps not in this exact format. time_tuple = strptime(rfc3339, '%Y-%m-%dT%H:%M:%SZ') return timegm(time_tuple)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_name_from_git(repo, branch, limit=53, separator='-', hash_size=5): """ return name string representing the given git repo and branch to be used as a build name. NOTE: Build name will be used to generate pods which have a limit of 64 characters and is composed as: <buildname>-<buildnumber>-<podsuffix> rhel7-1-build Assuming '-XXXX' (5 chars) and '-build' (6 chars) as default suffixes, name should be limited to 53 chars (64 - 11). OpenShift is very peculiar in which BuildConfig names it allows. For this reason, only certain characters are allowed. Any disallowed characters will be removed from repo and branch names. :param repo: str, the git repository to be used :param branch: str, the git branch to be used :param limit: int, max name length :param separator: str, used to separate the repo and branch in name :return: str, name representing git repo and branch. """
branch = branch or 'unknown' full = urlparse(repo).path.lstrip('/') + branch repo = git_repo_humanish_part_from_uri(repo) shaval = sha256(full.encode('utf-8')).hexdigest() hash_str = shaval[:hash_size] limit = limit - len(hash_str) - 1 sanitized = sanitize_strings_for_openshift(repo, branch, limit, separator, False) return separator.join(filter(None, (sanitized, hash_str)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def wrap_name_from_git(prefix, suffix, *args, **kwargs): """ wraps the result of make_name_from_git in a suffix and postfix adding separators for each. see docstring for make_name_from_git for a full list of parameters """
# 64 is maximum length allowed by OpenShift # 2 is the number of dashes that will be added prefix = ''.join(filter(VALID_BUILD_CONFIG_NAME_CHARS.match, list(prefix))) suffix = ''.join(filter(VALID_BUILD_CONFIG_NAME_CHARS.match, list(suffix))) kwargs['limit'] = kwargs.get('limit', 64) - len(prefix) - len(suffix) - 2 name_from_git = make_name_from_git(*args, **kwargs) return '-'.join([prefix, name_from_git, suffix])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_name(self, label_type): """ returns the most preferred label name if there isn't any correct name in the list it will return newest label name """
if label_type in self._label_values: return self._label_values[label_type][0] else: return Labels.LABEL_NAMES[label_type][0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_new_names_by_old(): """Return dictionary, new label name indexed by old label name."""
newdict = {} for label_type, label_names in Labels.LABEL_NAMES.items(): for oldname in label_names[1:]: newdict[oldname] = Labels.LABEL_NAMES[label_type][0] return newdict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def kerberos_ccache_init(principal, keytab_file, ccache_file=None): """ Checks whether kerberos credential cache has ticket-granting ticket that is valid for at least an hour. Default ccache is used unless ccache_file is provided. In that case, KRB5CCNAME environment variable is set to the value of ccache_file if we successfully obtain the ticket. """
tgt_valid = False env = {"LC_ALL": "C"} # klist uses locales to format date on RHEL7+ if ccache_file: env["KRB5CCNAME"] = ccache_file # check if we have tgt that is valid more than one hour rc, klist, _ = run(["klist"], extraenv=env) if rc == 0: for line in klist.splitlines(): m = re.match(KLIST_TGT_RE, line) if m: year = m.group("year") if len(year) == 2: year = "20" + year expires = datetime.datetime( int(year), int(m.group("month")), int(m.group("day")), int(m.group("hour")), int(m.group("minute")), int(m.group("second")) ) if expires - datetime.datetime.now() > datetime.timedelta(hours=1): logger.debug("Valid TGT found, not renewing") tgt_valid = True break if not tgt_valid: logger.debug("Retrieving kerberos TGT") rc, out, err = run(["kinit", "-k", "-t", keytab_file, principal], extraenv=env) if rc != 0: raise OsbsException("kinit returned %s:\nstdout: %s\nstderr: %s" % (rc, out, err)) if ccache_file: os.environ["KRB5CCNAME"] = ccache_file
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(self): """ Extract tabular data as |TableData| instances from a SQLite database file. |load_source_desc_file| :return: Loaded table data iterator. |load_table_name_desc| =================== ============================================== Format specifier Value after the replacement =================== ============================================== ``%(filename)s`` |filename_desc| ``%(key)s`` ``%(format_name)s%(format_id)s`` ``%(format_name)s`` ``"sqlite"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ============================================== :rtype: |TableData| iterator :raises pytablereader.DataError: If the SQLite database file data is invalid or empty. """
self._validate() formatter = SqliteTableFormatter(self.source) formatter.accept(self) return formatter.to_table_data()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_data(self): """ Find the data stored in the config_map :return: dict, the json of the data data that was passed into the ConfigMap on creation """
data = graceful_chain_get(self.json, "data") if data is None: return {} data_dict = {} for key in data: if self.is_yaml(key): data_dict[key] = yaml.load(data[key]) else: data_dict[key] = json.loads(data[key]) return data_dict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_data_by_key(self, name): """ Find the object stored by a JSON string at key 'name' :return: str or dict, the json of the str or dict stored in the ConfigMap at that location """
data = graceful_chain_get(self.json, "data") if data is None or name not in data: return {} if self.is_yaml(name): return yaml.load(data[name]) or {} return json.loads(data[name])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list_builds(self, field_selector=None, koji_task_id=None, running=None, labels=None): """ List builds with matching fields :param field_selector: str, field selector for Builds :param koji_task_id: str, only list builds for Koji Task ID :return: BuildResponse list """
if running: running_fs = ",".join(["status!={status}".format(status=status.capitalize()) for status in BUILD_FINISHED_STATES]) if not field_selector: field_selector = running_fs else: field_selector = ','.join([field_selector, running_fs]) response = self.os.list_builds(field_selector=field_selector, koji_task_id=koji_task_id, labels=labels) serialized_response = response.json() build_list = [] for build in serialized_response["items"]: build_list.append(BuildResponse(build, self)) return build_list
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_build_request(self, build_type=None, inner_template=None, outer_template=None, customize_conf=None, arrangement_version=DEFAULT_ARRANGEMENT_VERSION): """ return instance of BuildRequest or BuildRequestV2 :param build_type: str, unused :param inner_template: str, name of inner template for BuildRequest :param outer_template: str, name of outer template for BuildRequest :param customize_conf: str, name of customization config for BuildRequest :param arrangement_version: int, value of the arrangement version :return: instance of BuildRequest or BuildRequestV2 """
if build_type is not None: warnings.warn("build types are deprecated, do not use the build_type argument") validate_arrangement_version(arrangement_version) if not arrangement_version or arrangement_version < REACTOR_CONFIG_ARRANGEMENT_VERSION: build_request = BuildRequest( build_json_store=self.os_conf.get_build_json_store(), inner_template=inner_template, outer_template=outer_template, customize_conf=customize_conf) else: build_request = BuildRequestV2( build_json_store=self.os_conf.get_build_json_store(), outer_template=outer_template, customize_conf=customize_conf) # Apply configured resource limits. cpu_limit = self.build_conf.get_cpu_limit() memory_limit = self.build_conf.get_memory_limit() storage_limit = self.build_conf.get_storage_limit() if (cpu_limit is not None or memory_limit is not None or storage_limit is not None): build_request.set_resource_limits(cpu=cpu_limit, memory=memory_limit, storage=storage_limit) return build_request
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_build_from_buildrequest(self, build_request): """ render provided build_request and submit build from it :param build_request: instance of build.build_request.BuildRequest :return: instance of build.build_response.BuildResponse """
build_request.set_openshift_required_version(self.os_conf.get_openshift_required_version()) build = build_request.render() response = self.os.create_build(json.dumps(build)) build_response = BuildResponse(response.json(), self) return build_response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_image_stream_info_for_build_request(self, build_request): """Return ImageStream, and ImageStreamTag name for base_image of build_request If build_request is not auto instantiated, objects are not fetched and None, None is returned. """
image_stream = None image_stream_tag_name = None if build_request.has_ist_trigger(): image_stream_tag_id = build_request.trigger_imagestreamtag image_stream_id, image_stream_tag_name = image_stream_tag_id.split(':') try: image_stream = self.get_image_stream(image_stream_id).json() except OsbsResponseException as x: if x.status_code != 404: raise if image_stream: try: self.get_image_stream_tag(image_stream_tag_id).json() except OsbsResponseException as x: if x.status_code != 404: raise return image_stream, image_stream_tag_name
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_prod_build(self, *args, **kwargs): """ Create a production build :param git_uri: str, URI of git repository :param git_ref: str, reference to commit :param git_branch: str, branch name :param user: str, user name :param component: str, not used anymore :param target: str, koji target :param architecture: str, build architecture :param yum_repourls: list, URLs for yum repos :param koji_task_id: int, koji task ID requesting build :param scratch: bool, this is a scratch build :param platform: str, the platform name :param platforms: list<str>, the name of each platform :param release: str, the release value to use :param inner_template: str, name of inner template for BuildRequest :param outer_template: str, name of outer template for BuildRequest :param customize_conf: str, name of customization config for BuildRequest :param arrangement_version: int, numbered arrangement of plugins for orchestration workflow :param signing_intent: str, signing intent of the ODCS composes :param compose_ids: list<int>, ODCS composes used :return: BuildResponse instance """
logger.warning("prod (all-in-one) builds are deprecated, " "please use create_orchestrator_build " "(support will be removed in version 0.54)") return self._do_create_prod_build(*args, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_worker_build(self, **kwargs): """ Create a worker build Pass through method to create_prod_build with the following modifications: - platform param is required - release param is required - arrangement_version param is required, which is used to select which worker_inner:n.json template to use - inner template set to worker_inner:n.json if not set - outer template set to worker.json if not set - customize configuration set to worker_customize.json if not set :return: BuildResponse instance """
missing = set() for required in ('platform', 'release', 'arrangement_version'): if not kwargs.get(required): missing.add(required) if missing: raise ValueError("Worker build missing required parameters: %s" % missing) if kwargs.get('platforms'): raise ValueError("Worker build called with unwanted platforms param") arrangement_version = kwargs['arrangement_version'] kwargs.setdefault('inner_template', WORKER_INNER_TEMPLATE.format( arrangement_version=arrangement_version)) kwargs.setdefault('outer_template', WORKER_OUTER_TEMPLATE) kwargs.setdefault('customize_conf', WORKER_CUSTOMIZE_CONF) kwargs['build_type'] = BUILD_TYPE_WORKER try: return self._do_create_prod_build(**kwargs) except IOError as ex: if os.path.basename(ex.filename) == kwargs['inner_template']: raise OsbsValidationException("worker invalid arrangement_version %s" % arrangement_version) raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_orchestrator_build(self, **kwargs): """ Create an orchestrator build Pass through method to create_prod_build with the following modifications: - platforms param is required - arrangement_version param may be used to select which orchestrator_inner:n.json template to use - inner template set to orchestrator_inner:n.json if not set - outer template set to orchestrator.json if not set - customize configuration set to orchestrator_customize.json if not set :return: BuildResponse instance """
if not self.can_orchestrate(): raise OsbsOrchestratorNotEnabled("can't create orchestrate build " "when can_orchestrate isn't enabled") extra = [x for x in ('platform',) if kwargs.get(x)] if extra: raise ValueError("Orchestrator build called with unwanted parameters: %s" % extra) arrangement_version = kwargs.setdefault('arrangement_version', self.build_conf.get_arrangement_version()) if arrangement_version < REACTOR_CONFIG_ARRANGEMENT_VERSION and not kwargs.get('platforms'): raise ValueError('Orchestrator build requires platforms param') kwargs.setdefault('inner_template', ORCHESTRATOR_INNER_TEMPLATE.format( arrangement_version=arrangement_version)) kwargs.setdefault('outer_template', ORCHESTRATOR_OUTER_TEMPLATE) kwargs.setdefault('customize_conf', ORCHESTRATOR_CUSTOMIZE_CONF) kwargs['build_type'] = BUILD_TYPE_ORCHESTRATOR try: return self._do_create_prod_build(**kwargs) except IOError as ex: if os.path.basename(ex.filename) == kwargs['inner_template']: raise OsbsValidationException("orchestrator invalid arrangement_version %s" % arrangement_version) raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_orchestrator_build_logs(self, build_id, follow=False, wait_if_missing=False): """ provide logs from orchestrator build :param build_id: str :param follow: bool, fetch logs as they come? :param wait_if_missing: bool, if build doesn't exist, wait :return: generator yielding objects with attributes 'platform' and 'line' """
logs = self.get_build_logs(build_id=build_id, follow=follow, wait_if_missing=wait_if_missing, decode=True) if logs is None: return if isinstance(logs, GeneratorType): for entries in logs: for entry in entries.splitlines(): yield LogEntry(*self._parse_build_log_entry(entry)) else: for entry in logs.splitlines(): yield LogEntry(*self._parse_build_log_entry(entry))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def import_image_tags(self, name, tags, repository, insecure=False): """Import image tags from specified container repository. :param name: str, name of ImageStream object :param tags: iterable, tags to be imported :param repository: str, remote location of container image in the format <registry>/<repository> :param insecure: bool, indicates whenever registry is secure :return: bool, whether tags were imported """
stream_import_file = os.path.join(self.os_conf.get_build_json_store(), 'image_stream_import.json') with open(stream_import_file) as f: stream_import = json.load(f) return self.os.import_image_tags(name, stream_import, tags, repository, insecure)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ensure_image_stream_tag(self, stream, tag_name, scheduled=False, source_registry=None, organization=None, base_image=None): """Ensures the tag is monitored in ImageStream :param stream: dict, ImageStream object :param tag_name: str, name of tag to check, without name of ImageStream as prefix :param scheduled: bool, if True, importPolicy.scheduled will be set to True in ImageStreamTag :param source_registry: dict, info about source registry :param organization: str, oganization for registry :param base_image: str, base image :return: bool, whether or not modifications were performed """
img_stream_tag_file = os.path.join(self.os_conf.get_build_json_store(), 'image_stream_tag.json') with open(img_stream_tag_file) as f: tag_template = json.load(f) repository = None registry = None insecure = False if source_registry: registry = RegistryURI(source_registry['url']).docker_uri insecure = source_registry.get('insecure', False) if base_image and registry: repository = self._get_enclosed_repo_with_source_registry(base_image, registry, organization) return self.os.ensure_image_stream_tag(stream, tag_name, tag_template, scheduled, repository=repository, insecure=insecure)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_image_stream(self, name, docker_image_repository, insecure_registry=False): """ Create an ImageStream object Raises exception on error :param name: str, name of ImageStream :param docker_image_repository: str, pull spec for docker image repository :param insecure_registry: bool, whether plain HTTP should be used :return: response """
img_stream_file = os.path.join(self.os_conf.get_build_json_store(), 'image_stream.json') with open(img_stream_file) as f: stream = json.load(f) stream['metadata']['name'] = name stream['metadata'].setdefault('annotations', {}) stream['metadata']['annotations'][ANNOTATION_SOURCE_REPO] = docker_image_repository if insecure_registry: stream['metadata']['annotations'][ANNOTATION_INSECURE_REPO] = 'true' return self.os.create_image_stream(json.dumps(stream))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_compression_extension(self): """ Find the filename extension for the 'docker save' output, which may or may not be compressed. Raises OsbsValidationException if the extension cannot be determined due to a configuration error. :returns: str including leading dot, or else None if no compression """
build_request = BuildRequest(build_json_store=self.os_conf.get_build_json_store()) inner = build_request.inner_template postbuild_plugins = inner.get('postbuild_plugins', []) for plugin in postbuild_plugins: if plugin.get('name') == 'compress': args = plugin.get('args', {}) method = args.get('method', 'gzip') if method == 'gzip': return '.gz' elif method == 'lzma': return '.xz' raise OsbsValidationException("unknown compression method '%s'" % method) return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_config_map(self, name, data): """ Create an ConfigMap object on the server Raises exception on error :param name: str, name of configMap :param data: dict, dictionary of data to be stored :returns: ConfigMapResponse containing the ConfigMap with name and data """
config_data_file = os.path.join(self.os_conf.get_build_json_store(), 'config_map.json') with open(config_data_file) as f: config_data = json.load(f) config_data['metadata']['name'] = name data_dict = {} for key, value in data.items(): data_dict[key] = json.dumps(value) config_data['data'] = data_dict response = self.os.create_config_map(config_data) config_map_response = ConfigMapResponse(response.json()) return config_map_response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_config_map(self, name): """ Get a ConfigMap object from the server Raises exception on error :param name: str, name of configMap to get from the server :returns: ConfigMapResponse containing the ConfigMap with the requested name """
response = self.os.get_config_map(name) config_map_response = ConfigMapResponse(response.json()) return config_map_response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def wipe(self): """ Wipe the bolt database. Calling this after HoverPy has been instantiated is potentially dangerous. This function is mostly used internally for unit tests. """
try: if os.isfile(self._dbpath): os.remove(self._dbpath) except OSError: pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def metadata(self, delete=False): """ Gets the metadata. """
if delete: return self._session.delete(self.__v1() + "/metadata").json() else: return self._session.get(self.__v1() + "/metadata").json()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def addDelay(self, urlPattern="", delay=0, httpMethod=None): """ Adds delays. """
print("addDelay is deprecated please use delays instead") delay = {"urlPattern": urlPattern, "delay": delay} if httpMethod: delay["httpMethod"] = httpMethod return self.delays(delays={"data": [delay]})
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __enableProxy(self): """ Set the required environment variables to enable the use of hoverfly as a proxy. """
os.environ[ "HTTP_PROXY"] = self.httpProxy() os.environ[ "HTTPS_PROXY"] = self.httpsProxy() os.environ["REQUESTS_CA_BUNDLE"] = os.path.join( os.path.dirname( os.path.abspath(__file__)), "cert.pem")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __writepid(self, pid): """ HoverFly fails to launch if it's already running on the same ports. So we have to keep track of them using temp files with the proxy port and admin port, containing the processe's PID. """
import tempfile d = tempfile.gettempdir() name = os.path.join(d, "hoverpy.%i.%i"%(self._proxyPort, self._adminPort)) with open(name, 'w') as f: f.write(str(pid)) logging.debug("writing to %s"%name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __rmpid(self): """ Remove the PID file on shutdown, unfortunately this may not get called if not given the time to shut down. """
import tempfile d = tempfile.gettempdir() name = os.path.join(d, "hoverpy.%i.%i"%(self._proxyPort, self._adminPort)) if os.path.exists(name): os.unlink(name) logging.debug("deleting %s"%name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __start(self): """ Start the hoverfly process. This function waits until it can make contact with the hoverfly API before returning. """
logging.debug("starting %i" % id(self)) self.__kill_if_not_shut_properly() self.FNULL = open(os.devnull, 'w') flags = self.__flags() cmd = [hoverfly] + flags if self._showCmd: print(cmd) self._process = Popen( [hoverfly] + flags, stdin=self.FNULL, stdout=self.FNULL, stderr=subprocess.STDOUT) start = time.time() while time.time() - start < 1: try: url = "http://%s:%i/api/health" % (self._host, self._adminPort) r = self._session.get(url) j = r.json() up = "message" in j and "healthy" in j["message"] if up: logging.debug("has pid %i" % self._process.pid) self.__writepid(self._process.pid) return self._process else: time.sleep(1/100.0) except: # import traceback # traceback.print_exc() # wait 10 ms before trying again time.sleep(1/100.0) pass logging.error("Could not start hoverfly!") raise ValueError("Could not start hoverfly!")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __stop(self): """ Stop the hoverfly process. """
if logging: logging.debug("stopping") self._process.terminate() # communicate means we wait until the process # was actually terminated, this removes some # warnings in python3 self._process.communicate() self._process = None self.FNULL.close() self.FNULL = None self.__disableProxy() # del self._session # self._session = None self.__rmpid()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __flags(self): """ Internal method. Turns arguments into flags. """
flags = [] if self._capture: flags.append("-capture") if self._spy: flags.append("-spy") if self._dbpath: flags += ["-db-path", self._dbpath] flags += ["-db", "boltdb"] else: flags += ["-db", "memory"] if self._synthesize: assert(self._middleware) flags += ["-synthesize"] if self._simulation: flags += ["-import", self._simulation] if self._proxyPort: flags += ["-pp", str(self._proxyPort)] if self._adminPort: flags += ["-ap", str(self._adminPort)] if self._modify: flags += ["-modify"] if self._verbose: flags += ["-v"] if self._dev: flags += ["-dev"] if self._metrics: flags += ["-metrics"] if self._auth: flags += ["-auth"] if self._middleware: flags += ["-middleware", self._middleware] if self._cert: flags += ["-cert", self._cert] if self._certName: flags += ["-cert-name", self._certName] if self._certOrg: flags += ["-cert-org", self._certOrg] if self._destination: flags += ["-destination", self._destination] if self._key: flags += ["-key", self._key] if self._dest: for i in range(len(self._dest)): flags += ["-dest", self._dest[i]] if self._generateCACert: flags += ["-generate-ca-cert"] if not self._tlsVerification: flags += ["-tls-verification", "false"] logging.debug("flags:" + str(flags)) return flags
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(self): """ Extract tabular data as |TableData| instances from a JSON file. |load_source_desc_file| This method can be loading four types of JSON formats: **(1)** Single table data in a file: .. code-block:: json :caption: Acceptable JSON Schema (1): single table { "type": "array", "items": { "type": "object", "additionalProperties": { "anyOf": [ {"type": "string"}, {"type": "number"}, {"type": "boolean"}, {"type": "null"} ] } } } .. code-block:: json :caption: Acceptable JSON example for the JSON schema (1) [ {"attr_b": 4, "attr_c": "a", "attr_a": 1}, {"attr_b": 2.1, "attr_c": "bb", "attr_a": 2}, {"attr_b": 120.9, "attr_c": "ccc", "attr_a": 3} ] The example data will be loaded as the following tabular data: .. table:: +------+------+------+ |attr_a|attr_b|attr_c| +======+======+======+ | 1| 4.0|a | +------+------+------+ | 2| 2.1|bb | +------+------+------+ | 3| 120.9|ccc | +------+------+------+ **(2)** Single table data in a file: .. code-block:: json :caption: Acceptable JSON Schema (2): single table { "type": "object", "additionalProperties": { "type": "array", "items": { "anyOf": [ {"type": "string"}, {"type": "number"}, {"type": "boolean"}, {"type": "null"} ] } } } .. code-block:: json :caption: Acceptable JSON example for the JSON schema (2) { "attr_a": [1, 2, 3], "attr_b": [4, 2.1, 120.9], "attr_c": ["a", "bb", "ccc"] } The example data will be loaded as the following tabular data: .. table:: +------+------+------+ |attr_a|attr_b|attr_c| +======+======+======+ | 1| 4.0|a | +------+------+------+ | 2| 2.1|bb | +------+------+------+ | 3| 120.9|ccc | +------+------+------+ **(3)** Single table data in a file: .. code-block:: json :caption: Acceptable JSON Schema (3): single table { "type": "object", "additionalProperties": { "anyOf": [ {"type": "string"}, {"type": "number"}, {"type": "boolean"}, {"type": "null"} ] } } .. code-block:: json :caption: Acceptable JSON example for the JSON schema (3) { "num_ratings": 27, "support_threads": 1, "downloaded": 925716, "last_updated":"2017-12-01 6:22am GMT", "added":"2010-01-20", "num": 1.1, "hoge": null } The example data will be loaded as the following tabular data: .. table:: | key | value | +===============+=====================+ |num_ratings | 27| |support_threads| 1| |downloaded | 925716| |last_updated |2017-12-01 6:22am GMT| |added |2010-01-20 | |num | 1.1| |hoge |None | **(4)** Multiple table data in a file: .. code-block:: json :caption: Acceptable JSON Schema (4): multiple tables { "type": "object", "additionalProperties": { "type": "array", "items": { "type": "object", "additionalProperties": { "anyOf": [ {"type": "string"}, {"type": "number"}, {"type": "boolean"}, {"type": "null"} ] } } } } .. code-block:: json :caption: Acceptable JSON example for the JSON schema (4) { "table_a" : [ {"attr_b": 4, "attr_c": "a", "attr_a": 1}, {"attr_b": 2.1, "attr_c": "bb", "attr_a": 2}, {"attr_b": 120.9, "attr_c": "ccc", "attr_a": 3} ], "table_b" : [ {"a": 1, "b": 4}, {"a": 2 }, {"a": 3, "b": 120.9} ] } The example data will be loaded as the following tabular data: .. table:: table_a +------+------+------+ |attr_a|attr_b|attr_c| +======+======+======+ | 1| 4.0|a | +------+------+------+ | 2| 2.1|bb | +------+------+------+ | 3| 120.9|ccc | +------+------+------+ .. table:: table_b +-+-----+ |a| b | +=+=====+ |1| 4.0| +-+-----+ |2| None| +-+-----+ |3|120.9| +-+-----+ **(5)** Multiple table data in a file: .. code-block:: json :caption: Acceptable JSON Schema (5): multiple tables { "type": "object", "additionalProperties": { "type": "object", "additionalProperties": { "type": "array", "items": { "anyOf": [ {"type": "string"}, {"type": "number"}, {"type": "boolean"}, {"type": "null"} ] } } } } .. code-block:: json :caption: Acceptable JSON example for the JSON schema (5) { "table_a" : { "attr_a": [1, 2, 3], "attr_b": [4, 2.1, 120.9], "attr_c": ["a", "bb", "ccc"] }, "table_b" : { "a": [1, 3], "b": [4, 120.9] } } The example data will be loaded as the following tabular data: .. table:: table_a +------+------+------+ |attr_a|attr_b|attr_c| +======+======+======+ | 1| 4.0|a | +------+------+------+ | 2| 2.1|bb | +------+------+------+ | 3| 120.9|ccc | +------+------+------+ .. table:: table_b +-+-----+ |a| b | +=+=====+ |1| 4.0| +-+-----+ |3|120.9| +-+-----+ **(6)** Multiple table data in a file: .. code-block:: json :caption: Acceptable JSON Schema (6): multiple tables { "type": "object", "additionalProperties": { "type": "object", "additionalProperties": { "anyOf": [ {"type": "string"}, {"type": "number"}, {"type": "boolean"}, {"type": "null"} ] } } } .. code-block:: json :caption: Acceptable JSON example for the JSON schema (6) { "table_a": { "num_ratings": 27, "support_threads": 1, "downloaded": 925716, "last_updated":"2017-12-01 6:22am GMT", "added":"2010-01-20", "num": 1.1, "hoge": null }, "table_b": { "a": 4, "b": 120.9 } } The example data will be loaded as the following tabular data: .. table:: table_a | key | value | +===============+=====================+ |num_ratings | 27| |support_threads| 1| |downloaded | 925716| |last_updated |2017-12-01 6:22am GMT| |added |2010-01-20 | |num | 1.1| |hoge |None | .. table:: table_b +---+-----+ |key|value| +===+=====+ |a | 4.0| +---+-----+ |b |120.9| +---+-----+ :return: Loaded table data iterator. |load_table_name_desc| =================== ============================================== Format specifier Value after the replacement =================== ============================================== ``%(filename)s`` |filename_desc| ``%(key)s`` | This replaced the different value | for each single/multiple JSON tables: | [single JSON table] | ``%(format_name)s%(format_id)s`` | [multiple JSON table] Table data key. ``%(format_name)s`` ``"json"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ============================================== :rtype: |TableData| iterator :raises pytablereader.DataError: If the data is invalid JSON. :raises pytablereader.error.ValidationError: If the data is not acceptable JSON format. """
formatter = JsonTableFormatter(self.load_dict()) formatter.accept(self) return formatter.to_table_data()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(self): """ Extract tabular data as |TableData| instances from a MediaWiki file. |load_source_desc_file| :return: Loaded table data iterator. |load_table_name_desc| =================== ============================================== Format specifier Value after the replacement =================== ============================================== ``%(filename)s`` |filename_desc| ``%(key)s`` | This replaced to: | **(1)** ``caption`` mark of the table | **(2)** ``%(format_name)s%(format_id)s`` | if ``caption`` mark not included | in the table. ``%(format_name)s`` ``"mediawiki"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ============================================== :rtype: |TableData| iterator :raises pytablereader.DataError: If the MediaWiki data is invalid or empty. """
self._validate() self._logger.logging_load() self.encoding = get_file_encoding(self.source, self.encoding) with io.open(self.source, "r", encoding=self.encoding) as fp: formatter = MediaWikiTableFormatter(fp.read()) formatter.accept(self) return formatter.to_table_data()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(self): """ Extract tabular data as |TableData| instances from a MediaWiki text object. |load_source_desc_text| :return: Loaded table data iterator. |load_table_name_desc| =================== ============================================== Format specifier Value after the replacement =================== ============================================== ``%(filename)s`` ``""`` ``%(key)s`` | This replaced to: | **(1)** ``caption`` mark of the table | **(2)** ``%(format_name)s%(format_id)s`` | if ``caption`` mark not included | in the table. ``%(format_name)s`` ``"mediawiki"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ============================================== :rtype: |TableData| iterator :raises pytablereader.DataError: If the MediaWiki data is invalid or empty. """
self._validate() self._logger.logging_load() formatter = MediaWikiTableFormatter(self.source) formatter.accept(self) return formatter.to_table_data()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_dock_json(self): """ return dock json from existing build json """
env_json = self.build_json['spec']['strategy']['customStrategy']['env'] try: p = [env for env in env_json if env["name"] == "ATOMIC_REACTOR_PLUGINS"] except TypeError: raise RuntimeError("\"env\" is not iterable") if len(p) <= 0: raise RuntimeError("\"env\" misses key ATOMIC_REACTOR_PLUGINS") dock_json_str = p[0]['value'] dock_json = json.loads(dock_json_str) return dock_json
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_container_image_ids(self): """ Find the image IDs the containers use. :return: dict, image tag to docker ID """
statuses = graceful_chain_get(self.json, "status", "containerStatuses") if statuses is None: return {} def remove_prefix(image_id, prefix): if image_id.startswith(prefix): return image_id[len(prefix):] return image_id return {status['image']: remove_prefix(status['imageID'], 'docker://') for status in statuses}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_failure_reason(self): """ Find the reason a pod failed :return: dict, which will always have key 'reason': reason: brief reason for state containerID (if known): ID of container exitCode (if known): numeric exit code """
reason_key = 'reason' cid_key = 'containerID' exit_key = 'exitCode' pod_status = self.json.get('status', {}) statuses = pod_status.get('containerStatuses', []) # Find the first non-zero exit code from a container # and return its 'message' or 'reason' value for status in statuses: try: terminated = status['state']['terminated'] exit_code = terminated['exitCode'] if exit_code != 0: reason_dict = { exit_key: exit_code, } if 'containerID' in terminated: reason_dict[cid_key] = terminated['containerID'] for key in ['message', 'reason']: try: reason_dict[reason_key] = terminated[key] break except KeyError: continue else: # Both 'message' and 'reason' are missing reason_dict[reason_key] = 'Exit code {code}'.format( code=exit_code ) return reason_dict except KeyError: continue # Failing that, return the 'message' or 'reason' value for the # pod for key in ['message', 'reason']: try: return {reason_key: pod_status[key]} except KeyError: continue return {reason_key: pod_status['phase']}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_error_message(self): """ Return an error message based on atomic-reactor's metadata """
error_reason = self.get_error_reason() if error_reason: error_message = error_reason.get('pod') or None if error_message: return "Error in pod: %s" % error_message plugin = error_reason.get('plugin')[0] or None error_message = error_reason.get('plugin')[1] or None if error_message: # Plugin has non-empty error description return "Error in plugin %s: %s" % (plugin, error_message) else: return "Error in plugin %s" % plugin
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(self): """ Extract tabular data as |TableData| instances from an Excel file. |spreadsheet_load_desc| :return: Loaded |TableData| iterator. |TableData| created for each sheet in the workbook. |load_table_name_desc| =================== ==================================== Format specifier Value after the replacement =================== ==================================== ``%(filename)s`` Filename of the workbook ``%(sheet)s`` Name of the sheet ``%(format_name)s`` ``"spreadsheet"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ==================================== :rtype: |TableData| iterator :raises pytablereader.DataError: If the header row is not found. :raises pytablereader.error.OpenError: If failed to open the source file. """
import xlrd self._validate() self._logger.logging_load() try: workbook = xlrd.open_workbook(self.source) except xlrd.biffh.XLRDError as e: raise OpenError(e) for worksheet in workbook.sheets(): self._worksheet = worksheet if self._is_empty_sheet(): continue self.__extract_not_empty_col_idx() try: start_row_idx = self._get_start_row_idx() except DataError: continue rows = [ self.__get_row_values(row_idx) for row_idx in range(start_row_idx + 1, self._row_count) ] self.inc_table_count() headers = self.__get_row_values(start_row_idx) yield TableData( self._make_table_name(), headers, rows, dp_extractor=self.dp_extractor, type_hints=self._extract_type_hints(headers), )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(self): """ Extract tabular data as |TableData| instances from a LTSV file. |load_source_desc_file| :return: Loaded table data. |load_table_name_desc| =================== ======================================== Format specifier Value after the replacement =================== ======================================== ``%(filename)s`` |filename_desc| ``%(format_name)s`` ``"ltsv"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ======================================== :rtype: |TableData| iterator :raises pytablereader.InvalidHeaderNameError: If an invalid label name is included in the LTSV file. :raises pytablereader.DataError: If the LTSV data is invalid. """
self._validate() self._logger.logging_load() self.encoding = get_file_encoding(self.source, self.encoding) self._ltsv_input_stream = io.open(self.source, "r", encoding=self.encoding) for data_matrix in self._to_data_matrix(): formatter = SingleJsonTableConverterA(data_matrix) formatter.accept(self) return formatter.to_table_data()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(self): """ Extract tabular data as |TableData| instances from a LTSV text object. |load_source_desc_text| :return: Loaded table data. |load_table_name_desc| =================== ======================================== Format specifier Value after the replacement =================== ======================================== ``%(filename)s`` ``""`` ``%(format_name)s`` ``"ltsv"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ======================================== :rtype: |TableData| iterator :raises pytablereader.InvalidHeaderNameError: If an invalid label name is included in the LTSV file. :raises pytablereader.DataError: If the LTSV data is invalid. """
self._validate() self._logger.logging_load() self._ltsv_input_stream = self.source.splitlines() for data_matrix in self._to_data_matrix(): formatter = SingleJsonTableConverterA(data_matrix) formatter.accept(self) return formatter.to_table_data()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def encode_args(args, extra=False): """ Encode a list of arguments """
if not args: return '' methodargs = ', '.join([encode(a) for a in args]) if extra: methodargs += ', ' return methodargs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fill(self, field, value): """ Fill a specified form field in the current document. :param field: an instance of :class:`zombie.dom.DOMNode` :param value: any string value :return: self to allow function chaining. """
self.client.nowait('browser.fill', (field, value)) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def value(self, value): """ Used to set the ``value`` of form elements. """
self.client.nowait( 'set_field', (Literal('browser'), self.element, value))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fire(self, event): """ Fires a specified DOM event on the current node. :param event: the name of the event to fire (e.g., 'click'). Returns the :class:`zombie.dom.DOMNode` to allow function chaining. """
self.browser.fire(self.element, event) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _utf8_encode(self, d): """ Ensures all values are encoded in UTF-8 and converts them to lowercase """
for k, v in d.items(): if isinstance(v, str): d[k] = v.encode('utf8').lower() if isinstance(v, list): for index,item in enumerate(v): item = item.encode('utf8').lower() v[index] = item if isinstance(v, dict): d[k] = self._utf8_encode(v) return d
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _bool_encode(self, d): """ Converts bool values to lowercase strings """
for k, v in d.items(): if isinstance(v, bool): d[k] = str(v).lower() return d
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_rosters(self): """ Parse the home and away game rosters :returns: ``self`` on success, ``None`` otherwise """
lx_doc = self.html_doc() if not self.__blocks: self.__pl_blocks(lx_doc) for t in ['home', 'away']: self.rosters[t] = self.__clean_pl_block(self.__blocks[t]) return self if self.rosters else None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_scratches(self): """ Parse the home and away healthy scratches :returns: ``self`` on success, ``None`` otherwise """
lx_doc = self.html_doc() if not self.__blocks: self.__pl_blocks(lx_doc) for t in ['aw_scr', 'h_scr']: ix = 'away' if t == 'aw_scr' else 'home' self.scratches[ix] = self.__clean_pl_block(self.__blocks[t]) return self if self.scratches else None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_coaches(self): """ Parse the home and away coaches :returns: ``self`` on success, ``None`` otherwise """
lx_doc = self.html_doc() tr = lx_doc.xpath('//tr[@id="HeadCoaches"]')[0] for i, td in enumerate(tr): txt = td.xpath('.//text()') txt = ex_junk(txt, ['\n','\r']) team = 'away' if i == 0 else 'home' self.coaches[team] = txt[0] return self if self.coaches else None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_officials(self): """ Parse the officials :returns: ``self`` on success, ``None`` otherwise """
# begin proper body of method lx_doc = self.html_doc() off_parser = opm(self.game_key.season) self.officials = off_parser(lx_doc) return self if self.officials else None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_valid(self, domain, diagnose=False): """Check whether a domain has a valid MX or A record. Keyword arguments: domain --- the domain to check diagnose --- flag to report a diagnosis or a boolean (default False) """
return_status = [ValidDiagnosis()] dns_checked = False # http://tools.ietf.org/html/rfc5321#section-2.3.5 # Names that can be resolved to MX RRs or address (i.e., A or AAAA) # RRs (as discussed in Section 5) are permitted, as are CNAME RRs # whose targets can be resolved, in turn, to MX or address RRs. # # http://tools.ietf.org/html/rfc5321#section-5.1 # The lookup first attempts to locate an MX record associated with # the name. If a CNAME record is found, the resulting name is # processed as if it were the initial name. ... If an empty list of # MXs is returned, the address is treated as if it was associated # with an implicit MX RR, with a preference of 0, pointing to that # host. # # is_email() author's note: We will regard the existence of a CNAME to # be sufficient evidence of the domain's existence. For performance # reasons we will not repeat the DNS lookup for the CNAME's target, but # we will raise a warning because we didn't immediately find an MX # record. try: dns.resolver.query(domain, 'MX') dns_checked = True except (dns.resolver.NXDOMAIN, dns.name.NameTooLong): # Domain can't be found in DNS return_status.append(DNSDiagnosis('NO_RECORD')) # Since dns.resolver gives more information than the PHP analog, we # can say that TLDs that throw an NXDOMAIN or NameTooLong error # have been checked if len(domain.split('.')) == 1: dns_checked = True except dns.resolver.NoAnswer: # MX-record for domain can't be found return_status.append(DNSDiagnosis('NO_MX_RECORD')) try: # TODO: See if we can/need to narrow to A / CNAME dns.resolver.query(domain) except dns.resolver.NoAnswer: # No usable records for the domain can be found return_status.append(DNSDiagnosis('NO_RECORD')) except dns.resolver.NoNameservers: return_status.append(DNSDiagnosis('NO_NAMESERVERS')) except (dns.exception.Timeout, dns.resolver.Timeout): return_status.append(DNSDiagnosis('DNS_TIMEDOUT')) # Check for TLD addresses # ----------------------- # TLD addresses are specifically allowed in RFC 5321 but they are # unusual to say the least. We will allocate a separate # status to these addresses on the basis that they are more likely # to be typos than genuine addresses (unless we've already # established that the domain does have an MX record) # # http://tools.ietf.org/html/rfc5321#section-2.3.5 # In the case of a top-level domain used by itself in an address, a # single string is used without any dots. This makes the requirement, # described in more detail below, that only fully-qualified domain # names appear in SMTP transactions on the public Internet, # particularly important where top-level domains are involved. # # TLD format # ---------- # The format of TLDs has changed a number of times. The standards # used by IANA have been largely ignored by ICANN, leading to # confusion over the standards being followed. These are not defined # anywhere, except as a general component of a DNS host name (a label). # However, this could potentially lead to 123.123.123.123 being a # valid DNS name (rather than an IP address) and thereby creating # an ambiguity. The most authoritative statement on TLD formats that # the author can find is in a (rejected!) erratum to RFC 1123 # submitted by John Klensin, the author of RFC 5321: # # http://www.rfc-editor.org/errata_search.php?rfc=1123&eid=1353 # However, a valid host name can never have the dotted-decimal # form #.#.#.#, since this change does not permit the highest-level # component label to start with a digit even if it is not # all-numeric. if not dns_checked: atom_list = domain.split(".") if len(atom_list) == 1: return_status.append(RFC5321Diagnosis('TLD')) try: float(atom_list[len(atom_list)-1][0]) return_status.append(RFC5321Diagnosis('TLDNUMERIC')) except ValueError: pass final_status = max(return_status) return final_status if diagnose else final_status == ValidDiagnosis()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_plays_stream(self): """Generate and yield a stream of parsed plays. Useful for per play processing."""
lx_doc = self.html_doc() if lx_doc is not None: parser = PlayParser(self.game_key.season, self.game_key.game_type) plays = lx_doc.xpath('//tr[@class = "evenColor"]') for p in plays: p_obj = parser.build_play(p) self.plays.append(p_obj) yield p_obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stack_files(files, hemi, source, target): """ This function takes a list of files as input and vstacks them """
import csv import os import numpy as np fname = "sdist_%s_%s_%s.csv" % (hemi, source, target) filename = os.path.join(os.getcwd(),fname) alldist = [] for dfile in files: alldist.append(np.genfromtxt(dfile, delimiter=',')) alldist = np.array(alldist) alldist.tofile(filename,",") return filename
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __html_rep(self, game_key, rep_code): """Retrieves the nhl html reports for the specified game and report code"""
seas, gt, num = game_key.to_tuple() url = [ self.__domain, "scores/htmlreports/", str(seas-1), str(seas), "/", rep_code, "0", str(gt), ("%04i" % (num)), ".HTM" ] url = ''.join(url) return self.__open(url)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_char(token): """Transforms the ASCII control character symbols to their real char. Note: If the token is not an ASCII control character symbol, just return the token. Keyword arguments: token -- the token to transform """
if ord(token) in _range(9216, 9229 + 1): token = _unichr(ord(token) - 9216) return token
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_freesurfer_label(annot_input, label_name, cortex=None): """ Get source node list for a specified freesurfer label. Inputs ------- annot_input : freesurfer annotation label file label_name : freesurfer label name cortex : not used """
if cortex is not None: print("Warning: cortex is not used to load the freesurfer label") labels, color_table, names = nib.freesurfer.read_annot(annot_input) names = [i.decode('utf-8') for i in names] label_value = names.index(label_name) label_nodes = np.array(np.where(np.in1d(labels, label_value)), dtype=np.int32) return label_nodes
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_freesurfer_label(annot_input, verbose = True): """ Print freesurfer label names. """
labels, color_table, names = nib.freesurfer.read_annot(annot_input) if verbose: print(names) return names
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def surf_keep_cortex(surf, cortex): """ Remove medial wall from cortical surface to ensure that shortest paths are only calculated through the cortex. Inputs ------- surf : Tuple containing two numpy arrays of shape (n_nodes,3). Each node of the first array specifies the x, y, z coordinates one node of the surface mesh. Each node of the second array specifies the indices of the three nodes building one triangle of the surface mesh. (e.g. the output from nibabel.freesurfer.io.read_geometry) cortex : Array with indices of vertices included in within the cortex. (e.g. the output from nibabel.freesurfer.io.read_label) """
# split surface into vertices and triangles vertices, triangles = surf # keep only the vertices within the cortex label cortex_vertices = np.array(vertices[cortex], dtype=np.float64) # keep only the triangles within the cortex label cortex_triangles = triangles_keep_cortex(triangles, cortex) return cortex_vertices, cortex_triangles
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def triangles_keep_cortex(triangles, cortex): """ Remove triangles with nodes not contained in the cortex label array """
# for or each face/triangle keep only those that only contain nodes within the list of cortex nodes input_shape = triangles.shape triangle_is_in_cortex = np.all(np.reshape(np.in1d(triangles.ravel(), cortex), input_shape), axis=1) cortex_triangles_old = np.array(triangles[triangle_is_in_cortex], dtype=np.int32) # reassign node index before outputting triangles new_index = np.digitize(cortex_triangles_old.ravel(), cortex, right=True) cortex_triangles = np.array(np.arange(len(cortex))[new_index].reshape(cortex_triangles_old.shape), dtype=np.int32) return cortex_triangles
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dist_calc(surf, cortex, source_nodes): """ Calculate exact geodesic distance along cortical surface from set of source nodes. "dist_type" specifies whether to calculate "min", "mean", "median", or "max" distance values from a region-of-interest. If running only on single node, defaults to "min". """
cortex_vertices, cortex_triangles = surf_keep_cortex(surf, cortex) translated_source_nodes = translate_src(source_nodes, cortex) data = gdist.compute_gdist(cortex_vertices, cortex_triangles, source_indices = translated_source_nodes) dist = recort(data, surf, cortex) del data return dist
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def zone_calc(surf, cortex, src): """ Calculate closest nodes to each source node using exact geodesic distance along the cortical surface. """
cortex_vertices, cortex_triangles = surf_keep_cortex(surf, cortex) dist_vals = np.zeros((len(source_nodes), len(cortex_vertices))) for x in range(len(source_nodes)): translated_source_nodes = translate_src(source_nodes[x], cortex) dist_vals[x, :] = gdist.compute_gdist(cortex_vertices, cortex_triangles, source_indices = translated_source_nodes) data = np.argsort(dist_vals, axis=0)[0, :] + 1 zone = recort(data, surf, cortex) del data return zone
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_module(filename): """ Loads a module by filename """
basename = os.path.basename(filename) path = os.path.dirname(filename) sys.path.append(path) # TODO(tlan) need to figure out how to handle errors thrown here return __import__(os.path.splitext(basename)[0])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_machine_mapping(machine_list): """ Convert the machine list argument from a list of names into a mapping of logical names to physical hosts. This is similar to the _parse_configs function but separated to provide the opportunity for extension and additional checking of machine access """
if machine_list is None: return {} else: mapping = {} for pair in machine_list: if (constants.MACHINE_SEPARATOR not in pair) or (pair.count(constants.MACHINE_SEPARATOR) != 1): raise ValueError("machine pairs must be passed as two strings separted by a %s", constants.MACHINE_SEPARATOR) (logical, physical) = pair.split(constants.MACHINE_SEPARATOR) # add checks for reachability mapping[logical] = physical return mapping
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_config_list(config_list): """ Parse a list of configuration properties separated by '=' """
if config_list is None: return {} else: mapping = {} for pair in config_list: if (constants.CONFIG_SEPARATOR not in pair) or (pair.count(constants.CONFIG_SEPARATOR) != 1): raise ValueError("configs must be passed as two strings separted by a %s", constants.CONFIG_SEPARATOR) (config, value) = pair.split(constants.CONFIG_SEPARATOR) mapping[config] = value return mapping
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def deploy(self, unique_id, configs=None): """Deploys the service to the host. This should at least perform the same actions as install and start but may perform additional tasks as needed. :Parameter unique_id: the name of the process :Parameter configs: a mao of configs the deployer may use to modify the deployment """
self.install(unique_id, configs) self.start(unique_id, configs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def undeploy(self, unique_id, configs=None): """Undeploys the service. This should at least perform the same actions as stop and uninstall but may perform additional tasks as needed. :Parameter unique_id: the name of the process :Parameter configs: a map of configs the deployer may use """
self.stop(unique_id, configs) self.uninstall(unique_id, configs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sleep(self, unique_id, delay, configs=None): """ Pauses the process for the specified delay and then resumes it :Parameter unique_id: the name of the process :Parameter delay: delay time in seconds """
self.pause(unique_id, configs) time.sleep(delay) self.resume(unique_id, configs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pause(self, unique_id, configs=None): """ Issues a sigstop for the specified process :Parameter unique_id: the name of the process """
pids = self.get_pid(unique_id, configs) if pids != constants.PROCESS_NOT_RUNNING_PID: pid_str = ' '.join(str(pid) for pid in pids) hostname = self.processes[unique_id].hostname with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh: better_exec_command(ssh, "kill -SIGSTOP {0}".format(pid_str), "PAUSING PROCESS {0}".format(unique_id))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _send_signal(self, unique_id, signalno, configs): """ Issues a signal for the specified process :Parameter unique_id: the name of the process """
pids = self.get_pid(unique_id, configs) if pids != constants.PROCESS_NOT_RUNNING_PID: pid_str = ' '.join(str(pid) for pid in pids) hostname = self.processes[unique_id].hostname msg= Deployer._signalnames.get(signalno,"SENDING SIGNAL %s TO"%signalno) with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh: better_exec_command(ssh, "kill -{0} {1}".format(signalno, pid_str), "{0} PROCESS {1}".format(msg, unique_id))