text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> gets the subcommands under the service name <END_TASK> <USER_TASK:> Description: def get_command(self, ctx, cmd_name): """ gets the subcommands under the service name Parameters ---------- ctx : Context the context object passed into the method cmd_name : str the service name Returns ------- EventTypeSubCommand: returns subcommand if successful, None if not. """
if cmd_name not in self.all_cmds: return None return EventTypeSubCommand(self.events_lib, cmd_name, self.all_cmds[cmd_name])
<SYSTEM_TASK:> gets the Click Commands underneath a service name <END_TASK> <USER_TASK:> Description: def get_command(self, ctx, cmd_name): """ gets the Click Commands underneath a service name Parameters ---------- ctx: Context context object passed in cmd_name: string the service name Returns ------- cmd: Click.Command the Click Commands that can be called from the CLI """
if cmd_name not in self.subcmd_definition: return None parameters = [] for param_name in self.subcmd_definition[cmd_name][self.TAGS].keys(): default = self.subcmd_definition[cmd_name][self.TAGS][param_name]["default"] parameters.append(click.Option( ["--{}".format(param_name)], default=default, help="Specify the {} name you'd like, otherwise the default = {}".format(param_name, default) )) command_callback = functools.partial(self.cmd_implementation, self.events_lib, self.top_level_cmd_name, cmd_name) cmd = click.Command(name=cmd_name, short_help=self.subcmd_definition[cmd_name]["help"], params=parameters, callback=command_callback) cmd = debug_option(cmd) return cmd
<SYSTEM_TASK:> calls for value substitution in the event json and returns the <END_TASK> <USER_TASK:> Description: def cmd_implementation(self, events_lib, top_level_cmd_name, subcmd_name, *args, **kwargs): """ calls for value substitution in the event json and returns the customized json as a string Parameters ---------- events_lib top_level_cmd_name: string the name of the service subcmd_name: string the name of the event under the service args: tuple any arguments passed in before kwargs kwargs: dict the keys and values for substitution in the json Returns ------- event: string returns the customized event json as a string """
event = events_lib.generate_event(top_level_cmd_name, subcmd_name, kwargs) click.echo(event) return event
<SYSTEM_TASK:> Generates the lars path for weighted data. <END_TASK> <USER_TASK:> Description: def generate_lars_path(weighted_data, weighted_labels): """Generates the lars path for weighted data. Args: weighted_data: data that has been weighted by kernel weighted_label: labels, weighted by kernel Returns: (alphas, coefs), both are arrays corresponding to the regularization parameter and coefficients, respectively """
x_vector = weighted_data alphas, _, coefs = lars_path(x_vector, weighted_labels, method='lasso', verbose=False) return alphas, coefs
<SYSTEM_TASK:> Iteratively adds features to the model <END_TASK> <USER_TASK:> Description: def forward_selection(self, data, labels, weights, num_features): """Iteratively adds features to the model"""
clf = Ridge(alpha=0, fit_intercept=True, random_state=self.random_state) used_features = [] for _ in range(min(num_features, data.shape[1])): max_ = -100000000 best = 0 for feature in range(data.shape[1]): if feature in used_features: continue clf.fit(data[:, used_features + [feature]], labels, sample_weight=weights) score = clf.score(data[:, used_features + [feature]], labels, sample_weight=weights) if score > max_: best = feature max_ = score used_features.append(best) return np.array(used_features)
<SYSTEM_TASK:> Selects features for the model. see explain_instance_with_data to <END_TASK> <USER_TASK:> Description: def feature_selection(self, data, labels, weights, num_features, method): """Selects features for the model. see explain_instance_with_data to understand the parameters."""
if method == 'none': return np.array(range(data.shape[1])) elif method == 'forward_selection': return self.forward_selection(data, labels, weights, num_features) elif method == 'highest_weights': clf = Ridge(alpha=0, fit_intercept=True, random_state=self.random_state) clf.fit(data, labels, sample_weight=weights) feature_weights = sorted(zip(range(data.shape[0]), clf.coef_ * data[0]), key=lambda x: np.abs(x[1]), reverse=True) return np.array([x[0] for x in feature_weights[:num_features]]) elif method == 'lasso_path': weighted_data = ((data - np.average(data, axis=0, weights=weights)) * np.sqrt(weights[:, np.newaxis])) weighted_labels = ((labels - np.average(labels, weights=weights)) * np.sqrt(weights)) nonzero = range(weighted_data.shape[1]) _, coefs = self.generate_lars_path(weighted_data, weighted_labels) for i in range(len(coefs.T) - 1, 0, -1): nonzero = coefs.T[i].nonzero()[0] if len(nonzero) <= num_features: break used_features = nonzero return used_features elif method == 'auto': if num_features <= 6: n_method = 'forward_selection' else: n_method = 'highest_weights' return self.feature_selection(data, labels, weights, num_features, n_method)
<SYSTEM_TASK:> Takes perturbed data, labels and distances, returns explanation. <END_TASK> <USER_TASK:> Description: def explain_instance_with_data(self, neighborhood_data, neighborhood_labels, distances, label, num_features, feature_selection='auto', model_regressor=None): """Takes perturbed data, labels and distances, returns explanation. Args: neighborhood_data: perturbed data, 2d array. first element is assumed to be the original data point. neighborhood_labels: corresponding perturbed labels. should have as many columns as the number of possible labels. distances: distances to original data point. label: label for which we want an explanation num_features: maximum number of features in explanation feature_selection: how to select num_features. options are: 'forward_selection': iteratively add features to the model. This is costly when num_features is high 'highest_weights': selects the features that have the highest product of absolute weight * original data point when learning with all the features 'lasso_path': chooses features based on the lasso regularization path 'none': uses all features, ignores num_features 'auto': uses forward_selection if num_features <= 6, and 'highest_weights' otherwise. model_regressor: sklearn regressor to use in explanation. Defaults to Ridge regression if None. Must have model_regressor.coef_ and 'sample_weight' as a parameter to model_regressor.fit() Returns: (intercept, exp, score, local_pred): intercept is a float. exp is a sorted list of tuples, where each tuple (x,y) corresponds to the feature id (x) and the local weight (y). The list is sorted by decreasing absolute value of y. score is the R^2 value of the returned explanation local_pred is the prediction of the explanation model on the original instance """
weights = self.kernel_fn(distances) labels_column = neighborhood_labels[:, label] used_features = self.feature_selection(neighborhood_data, labels_column, weights, num_features, feature_selection) if model_regressor is None: model_regressor = Ridge(alpha=1, fit_intercept=True, random_state=self.random_state) easy_model = model_regressor easy_model.fit(neighborhood_data[:, used_features], labels_column, sample_weight=weights) prediction_score = easy_model.score( neighborhood_data[:, used_features], labels_column, sample_weight=weights) local_pred = easy_model.predict(neighborhood_data[0, used_features].reshape(1, -1)) if self.verbose: print('Intercept', easy_model.intercept_) print('Prediction_local', local_pred,) print('Right:', neighborhood_labels[0, label]) return (easy_model.intercept_, sorted(zip(used_features, easy_model.coef_), key=lambda x: np.abs(x[1]), reverse=True), prediction_score, local_pred)
<SYSTEM_TASK:> Helper function to generate random div ids. This is useful for embedding <END_TASK> <USER_TASK:> Description: def id_generator(size=15, random_state=None): """Helper function to generate random div ids. This is useful for embedding HTML into ipython notebooks."""
chars = list(string.ascii_uppercase + string.digits) return ''.join(random_state.choice(chars, size, replace=True))
<SYSTEM_TASK:> Returns the list of classification labels for which we have any explanations. <END_TASK> <USER_TASK:> Description: def available_labels(self): """ Returns the list of classification labels for which we have any explanations. """
try: assert self.mode == "classification" except AssertionError: raise NotImplementedError('Not supported for regression explanations.') else: ans = self.top_labels if self.top_labels else self.local_exp.keys() return list(ans)
<SYSTEM_TASK:> Returns the explanation as a list. <END_TASK> <USER_TASK:> Description: def as_list(self, label=1, **kwargs): """Returns the explanation as a list. Args: label: desired label. If you ask for a label for which an explanation wasn't computed, will throw an exception. Will be ignored for regression explanations. kwargs: keyword arguments, passed to domain_mapper Returns: list of tuples (representation, weight), where representation is given by domain_mapper. Weight is a float. """
label_to_use = label if self.mode == "classification" else self.dummy_label ans = self.domain_mapper.map_exp_ids(self.local_exp[label_to_use], **kwargs) ans = [(x[0], float(x[1])) for x in ans] return ans
<SYSTEM_TASK:> Returns the explanation as a pyplot figure. <END_TASK> <USER_TASK:> Description: def as_pyplot_figure(self, label=1, **kwargs): """Returns the explanation as a pyplot figure. Will throw an error if you don't have matplotlib installed Args: label: desired label. If you ask for a label for which an explanation wasn't computed, will throw an exception. Will be ignored for regression explanations. kwargs: keyword arguments, passed to domain_mapper Returns: pyplot figure (barchart). """
import matplotlib.pyplot as plt exp = self.as_list(label=label, **kwargs) fig = plt.figure() vals = [x[1] for x in exp] names = [x[0] for x in exp] vals.reverse() names.reverse() colors = ['green' if x > 0 else 'red' for x in vals] pos = np.arange(len(exp)) + .5 plt.barh(pos, vals, align='center', color=colors) plt.yticks(pos, names) if self.mode == "classification": title = 'Local explanation for class %s' % self.class_names[label] else: title = 'Local explanation' plt.title(title) return fig
<SYSTEM_TASK:> Shows html explanation in ipython notebook. <END_TASK> <USER_TASK:> Description: def show_in_notebook(self, labels=None, predict_proba=True, show_predicted_value=True, **kwargs): """Shows html explanation in ipython notebook. See as_html() for parameters. This will throw an error if you don't have IPython installed"""
from IPython.core.display import display, HTML display(HTML(self.as_html(labels=labels, predict_proba=predict_proba, show_predicted_value=show_predicted_value, **kwargs)))
<SYSTEM_TASK:> Saves html explanation to file. . <END_TASK> <USER_TASK:> Description: def save_to_file(self, file_path, labels=None, predict_proba=True, show_predicted_value=True, **kwargs): """Saves html explanation to file. . Params: file_path: file to save explanations to See as_html() for additional parameters. """
file_ = open(file_path, 'w', encoding='utf8') file_.write(self.as_html(labels=labels, predict_proba=predict_proba, show_predicted_value=show_predicted_value, **kwargs)) file_.close()
<SYSTEM_TASK:> Checks for mistakes in 'parameters' <END_TASK> <USER_TASK:> Description: def _check_params(self, parameters): """Checks for mistakes in 'parameters' Args : parameters: dict, parameters to be checked Raises : ValueError: if any parameter is not a valid argument for the target function or the target function is not defined TypeError: if argument parameters is not iterable """
a_valid_fn = [] if self.target_fn is None: if callable(self): a_valid_fn.append(self.__call__) else: raise TypeError('invalid argument: tested object is not callable,\ please provide a valid target_fn') elif isinstance(self.target_fn, types.FunctionType) \ or isinstance(self.target_fn, types.MethodType): a_valid_fn.append(self.target_fn) else: a_valid_fn.append(self.target_fn.__call__) if not isinstance(parameters, str): for p in parameters: for fn in a_valid_fn: if has_arg(fn, p): pass else: raise ValueError('{} is not a valid parameter'.format(p)) else: raise TypeError('invalid argument: list or dictionnary expected')
<SYSTEM_TASK:> Maps ids to words or word-position strings. <END_TASK> <USER_TASK:> Description: def map_exp_ids(self, exp, positions=False): """Maps ids to words or word-position strings. Args: exp: list of tuples [(id, weight), (id,weight)] positions: if True, also return word positions Returns: list of tuples (word, weight), or (word_positions, weight) if examples: ('bad', 1) or ('bad_3-6-12', 1) """
if positions: exp = [('%s_%s' % ( self.indexed_string.word(x[0]), '-'.join( map(str, self.indexed_string.string_position(x[0])))), x[1]) for x in exp] else: exp = [(self.indexed_string.word(x[0]), x[1]) for x in exp] return exp
<SYSTEM_TASK:> Returns a string after removing the appropriate words. <END_TASK> <USER_TASK:> Description: def inverse_removing(self, words_to_remove): """Returns a string after removing the appropriate words. If self.bow is false, replaces word with UNKWORDZ instead of removing it. Args: words_to_remove: list of ids (ints) to remove Returns: original raw string with appropriate words removed. """
mask = np.ones(self.as_np.shape[0], dtype='bool') mask[self.__get_idxs(words_to_remove)] = False if not self.bow: return ''.join([self.as_list[i] if mask[i] else 'UNKWORDZ' for i in range(mask.shape[0])]) return ''.join([self.as_list[v] for v in mask.nonzero()[0]])
<SYSTEM_TASK:> Segment a string around the tokens created by a passed-in tokenizer <END_TASK> <USER_TASK:> Description: def _segment_with_tokens(text, tokens): """Segment a string around the tokens created by a passed-in tokenizer"""
list_form = [] text_ptr = 0 for token in tokens: inter_token_string = [] while not text[text_ptr:].startswith(token): inter_token_string.append(text[text_ptr]) text_ptr += 1 if text_ptr >= len(text): raise ValueError("Tokenization produced tokens that do not belong in string!") text_ptr += len(token) if inter_token_string: list_form.append(''.join(inter_token_string)) list_form.append(token) if text_ptr < len(text): list_form.append(text[text_ptr:]) return list_form
<SYSTEM_TASK:> Returns indexes to appropriate words. <END_TASK> <USER_TASK:> Description: def __get_idxs(self, words): """Returns indexes to appropriate words."""
if self.bow: return list(itertools.chain.from_iterable( [self.positions[z] for z in words])) else: return self.positions[words]
<SYSTEM_TASK:> Maps ids to feature names. <END_TASK> <USER_TASK:> Description: def map_exp_ids(self, exp): """Maps ids to feature names. Args: exp: list of tuples [(id, weight), (id,weight)] Returns: list of tuples (feature_name, weight) """
names = self.exp_feature_names if self.discretized_feature_names is not None: names = self.discretized_feature_names return [(names[x[0]], x[1]) for x in exp]
<SYSTEM_TASK:> Shows the current example in a table format. <END_TASK> <USER_TASK:> Description: def visualize_instance_html(self, exp, label, div_name, exp_object_name, show_table=True, show_all=False): """Shows the current example in a table format. Args: exp: list of tuples [(id, weight), (id,weight)] label: label id (integer) div_name: name of div object to be used for rendering(in js) exp_object_name: name of js explanation object show_table: if False, don't show table visualization. show_all: if True, show zero-weighted features in the table. """
if not show_table: return '' weights = [0] * len(self.feature_names) for x in exp: weights[x[0]] = x[1] out_list = list(zip(self.exp_feature_names, self.feature_values, weights)) if not show_all: out_list = [out_list[x[0]] for x in exp] ret = u''' %s.show_raw_tabular(%s, %d, %s); ''' % (exp_object_name, json.dumps(out_list, ensure_ascii=False), label, div_name) return ret
<SYSTEM_TASK:> Method to validate the structure of training data stats <END_TASK> <USER_TASK:> Description: def validate_training_data_stats(training_data_stats): """ Method to validate the structure of training data stats """
stat_keys = list(training_data_stats.keys()) valid_stat_keys = ["means", "mins", "maxs", "stds", "feature_values", "feature_frequencies"] missing_keys = list(set(valid_stat_keys) - set(stat_keys)) if len(missing_keys) > 0: raise Exception("Missing keys in training_data_stats. Details:" % (missing_keys))
<SYSTEM_TASK:> The predict_proba method will expect 3d arrays, but we are reshaping <END_TASK> <USER_TASK:> Description: def _make_predict_proba(self, func): """ The predict_proba method will expect 3d arrays, but we are reshaping them to 2D so that LIME works correctly. This wraps the function you give in explain_instance to first reshape the data to have the shape the the keras-style network expects. """
def predict_proba(X): n_samples = X.shape[0] new_shape = (n_samples, self.n_features, self.n_timesteps) X = np.transpose(X.reshape(new_shape), axes=(0, 2, 1)) return func(X) return predict_proba
<SYSTEM_TASK:> Init function. <END_TASK> <USER_TASK:> Description: def get_image_and_mask(self, label, positive_only=True, hide_rest=False, num_features=5, min_weight=0.): """Init function. Args: label: label to explain positive_only: if True, only take superpixels that contribute to the prediction of the label. Otherwise, use the top num_features superpixels, which can be positive or negative towards the label hide_rest: if True, make the non-explanation part of the return image gray num_features: number of superpixels to include in explanation min_weight: TODO Returns: (image, mask), where image is a 3d numpy array and mask is a 2d numpy array that can be used with skimage.segmentation.mark_boundaries """
if label not in self.local_exp: raise KeyError('Label not in explanation') segments = self.segments image = self.image exp = self.local_exp[label] mask = np.zeros(segments.shape, segments.dtype) if hide_rest: temp = np.zeros(self.image.shape) else: temp = self.image.copy() if positive_only: fs = [x[0] for x in exp if x[1] > 0 and x[1] > min_weight][:num_features] for f in fs: temp[segments == f] = image[segments == f].copy() mask[segments == f] = 1 return temp, mask else: for f, w in exp[:num_features]: if np.abs(w) < min_weight: continue c = 0 if w < 0 else 1 mask[segments == f] = 1 if w < 0 else 2 temp[segments == f] = image[segments == f].copy() temp[segments == f, c] = np.max(image) for cp in [0, 1, 2]: if c == cp: continue # temp[segments == f, cp] *= 0.5 return temp, mask
<SYSTEM_TASK:> Generates images and predictions in the neighborhood of this image. <END_TASK> <USER_TASK:> Description: def data_labels(self, image, fudged_image, segments, classifier_fn, num_samples, batch_size=10): """Generates images and predictions in the neighborhood of this image. Args: image: 3d numpy array, the image fudged_image: 3d numpy array, image to replace original image when superpixel is turned off segments: segmentation of the image classifier_fn: function that takes a list of images and returns a matrix of prediction probabilities num_samples: size of the neighborhood to learn the linear model batch_size: classifier_fn will be called on batches of this size. Returns: A tuple (data, labels), where: data: dense num_samples * num_superpixels labels: prediction probabilities matrix """
n_features = np.unique(segments).shape[0] data = self.random_state.randint(0, 2, num_samples * n_features)\ .reshape((num_samples, n_features)) labels = [] data[0, :] = 1 imgs = [] for row in data: temp = copy.deepcopy(image) zeros = np.where(row == 0)[0] mask = np.zeros(segments.shape).astype(bool) for z in zeros: mask[segments == z] = True temp[mask] = fudged_image[mask] imgs.append(temp) if len(imgs) == batch_size: preds = classifier_fn(np.array(imgs)) labels.extend(preds) imgs = [] if len(imgs) > 0: preds = classifier_fn(np.array(imgs)) labels.extend(preds) return data, np.array(labels)
<SYSTEM_TASK:> Checks if a callable accepts a given keyword argument. <END_TASK> <USER_TASK:> Description: def has_arg(fn, arg_name): """Checks if a callable accepts a given keyword argument. Args: fn: callable to inspect arg_name: string, keyword argument name to check Returns: bool, whether `fn` accepts a `arg_name` keyword argument. """
if sys.version_info < (3,): if isinstance(fn, types.FunctionType) or isinstance(fn, types.MethodType): arg_spec = inspect.getargspec(fn) else: try: arg_spec = inspect.getargspec(fn.__call__) except AttributeError: return False return (arg_name in arg_spec.args) elif sys.version_info < (3, 6): arg_spec = inspect.getfullargspec(fn) return (arg_name in arg_spec.args or arg_name in arg_spec.kwonlyargs) else: try: signature = inspect.signature(fn) except ValueError: # handling Cython signature = inspect.signature(fn.__call__) parameter = signature.parameters.get(arg_name) if parameter is None: return False return (parameter.kind in (inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY))
<SYSTEM_TASK:> The config file is stored in a way that allows you to have a <END_TASK> <USER_TASK:> Description: def _get_remote(self, config, name): """ The config file is stored in a way that allows you to have a cache for each remote. This is needed when specifying external outputs (as they require you to have an external cache location). Imagine a config file like the following: ['remote "dvc-storage"'] url = ssh://localhost/tmp ask_password = true [cache] ssh = dvc-storage This method resolves the name under the cache section into the correct Remote instance. Args: config (dict): The cache section on the config file name (str): Name of the section we are interested in to retrieve Returns: remote (dvc.Remote): Remote instance that the section is referring. None when there's no remote with that name. Example: >>> _get_remote(config={'ssh': 'dvc-storage'}, name='ssh') """
from dvc.remote import Remote remote = config.get(name) if not remote: return None settings = self.repo.config.get_remote_settings(remote) return Remote(self.repo, settings)
<SYSTEM_TASK:> Create a point on ASCII canvas. <END_TASK> <USER_TASK:> Description: def point(self, x, y, char): """Create a point on ASCII canvas. Args: x (int): x coordinate. Should be >= 0 and < number of columns in the canvas. y (int): y coordinate. Should be >= 0 an < number of lines in the canvas. char (str): character to place in the specified point on the canvas. """
assert len(char) == 1 assert x >= 0 assert x < self.cols assert y >= 0 assert y < self.lines self.canvas[y][x] = char
<SYSTEM_TASK:> Create a line on ASCII canvas. <END_TASK> <USER_TASK:> Description: def line(self, x0, y0, x1, y1, char): """Create a line on ASCII canvas. Args: x0 (int): x coordinate where the line should start. y0 (int): y coordinate where the line should start. x1 (int): x coordinate where the line should end. y1 (int): y coordinate where the line should end. char (str): character to draw the line with. """
# pylint: disable=too-many-arguments, too-many-branches if x0 > x1: x1, x0 = x0, x1 y1, y0 = y0, y1 dx = x1 - x0 dy = y1 - y0 if dx == 0 and dy == 0: self.point(x0, y0, char) elif abs(dx) >= abs(dy): for x in range(x0, x1 + 1): if dx == 0: y = y0 else: y = y0 + int(round((x - x0) * dy / float((dx)))) self.point(x, y, char) elif y0 < y1: for y in range(y0, y1 + 1): if dy == 0: x = x0 else: x = x0 + int(round((y - y0) * dx / float((dy)))) self.point(x, y, char) else: for y in range(y1, y0 + 1): if dy == 0: x = x0 else: x = x1 + int(round((y - y1) * dx / float((dy)))) self.point(x, y, char)
<SYSTEM_TASK:> Print a text on ASCII canvas. <END_TASK> <USER_TASK:> Description: def text(self, x, y, text): """Print a text on ASCII canvas. Args: x (int): x coordinate where the text should start. y (int): y coordinate where the text should start. text (str): string that should be printed. """
for i, char in enumerate(text): self.point(x + i, y, char)
<SYSTEM_TASK:> Create a box on ASCII canvas. <END_TASK> <USER_TASK:> Description: def box(self, x0, y0, width, height): """Create a box on ASCII canvas. Args: x0 (int): x coordinate of the box corner. y0 (int): y coordinate of the box corner. width (int): box width. height (int): box height. """
assert width > 1 assert height > 1 width -= 1 height -= 1 for x in range(x0, x0 + width): self.point(x, y0, "-") self.point(x, y0 + height, "-") for y in range(y0, y0 + height): self.point(x0, y, "|") self.point(x0 + width, y, "|") self.point(x0, y0, "+") self.point(x0 + width, y0, "+") self.point(x0, y0 + height, "+") self.point(x0 + width, y0 + height, "+")
<SYSTEM_TASK:> Refreshes progress bar. <END_TASK> <USER_TASK:> Description: def refresh(self, line=None): """Refreshes progress bar."""
# Just go away if it is locked. Will update next time if not self._lock.acquire(False): return if line is None: line = self._line if sys.stdout.isatty() and line is not None: self._writeln(line) self._line = line self._lock.release()
<SYSTEM_TASK:> Updates progress bar for a specified target. <END_TASK> <USER_TASK:> Description: def update_target(self, name, current, total): """Updates progress bar for a specified target."""
self.refresh(self._bar(name, current, total))
<SYSTEM_TASK:> Finishes progress bar for a specified target. <END_TASK> <USER_TASK:> Description: def finish_target(self, name): """Finishes progress bar for a specified target."""
# We have to write a msg about finished target with self._lock: pbar = self._bar(name, 100, 100) if sys.stdout.isatty(): self.clearln() self._print(pbar) self._n_finished += 1 self._line = None
<SYSTEM_TASK:> Gerenates diff message string output <END_TASK> <USER_TASK:> Description: def diff(self, a_ref, target=None, b_ref=None): """Gerenates diff message string output Args: target(str) - file/directory to check diff of a_ref(str) - first tag (optional) b_ref(str) - second git tag Returns: string: string of output message with diff info """
result = {} diff_dct = self.scm.get_diff_trees(a_ref, b_ref=b_ref) result[DIFF_A_REF] = diff_dct[DIFF_A_REF] result[DIFF_B_REF] = diff_dct[DIFF_B_REF] if diff_dct[DIFF_EQUAL]: result[DIFF_EQUAL] = True return result result[DIFF_LIST] = [] diff_outs = _get_diff_outs(self, diff_dct) if target is None: result[DIFF_LIST] = [ _diff_royal(self, path, diff_outs[path]) for path in diff_outs ] elif target in diff_outs: result[DIFF_LIST] = [_diff_royal(self, target, diff_outs[target])] else: msg = "Have not found file/directory '{}' in the commits" raise FileNotInCommitError(msg.format(target)) return result
<SYSTEM_TASK:> r"""Derive the evaluation of the given node for the given graph. <END_TASK> <USER_TASK:> Description: def _reproduce_stages( G, stages, node, force, dry, interactive, ignore_build_cache, no_commit, downstream, ): r"""Derive the evaluation of the given node for the given graph. When you _reproduce a stage_, you want to _evaluate the descendants_ to know if it make sense to _recompute_ it. A post-ordered search will give us an order list of the nodes we want. For example, let's say that we have the following pipeline: E / \ D F / \ \ B C G \ / A The derived evaluation of D would be: [A, B, C, D] In case that `downstream` option is specifed, the desired effect is to derive the evaluation starting from the given stage up to the ancestors. However, the `networkx.ancestors` returns a set, without any guarantee of any order, so we are going to reverse the graph and use a pre-ordered search using the given stage as a starting point. E A / \ / \ D F B C G / \ \ --- reverse --> \ / / B C G D F \ / \ / A E The derived evaluation of _downstream_ B would be: [B, D, E] """
import networkx as nx if downstream: # NOTE (py3 only): # Python's `deepcopy` defaults to pickle/unpickle the object. # Stages are complex objects (with references to `repo`, `outs`, # and `deps`) that cause struggles when you try to serialize them. # We need to create a copy of the graph itself, and then reverse it, # instead of using graph.reverse() directly because it calls # `deepcopy` underneath -- unless copy=False is specified. pipeline = nx.dfs_preorder_nodes(G.copy().reverse(copy=False), node) else: pipeline = nx.dfs_postorder_nodes(G, node) result = [] for n in pipeline: try: ret = _reproduce_stage( stages, n, force, dry, interactive, no_commit ) if len(ret) != 0 and ignore_build_cache: # NOTE: we are walking our pipeline from the top to the # bottom. If one stage is changed, it will be reproduced, # which tells us that we should force reproducing all of # the other stages down below, even if their direct # dependencies didn't change. force = True result += ret except Exception as ex: raise ReproductionError(stages[n].relpath, ex) return result
<SYSTEM_TASK:> csv.reader doesn't support Unicode input, so need to use some tricks <END_TASK> <USER_TASK:> Description: def csv_reader(unicode_csv_data, dialect=None, **kwargs): """csv.reader doesn't support Unicode input, so need to use some tricks to work around this. Source: https://docs.python.org/2/library/csv.html#csv-examples """
import csv dialect = dialect or csv.excel if is_py3: # Python3 supports encoding by default, so just return the object for row in csv.reader(unicode_csv_data, dialect=dialect, **kwargs): yield [cell for cell in row] else: # csv.py doesn't do Unicode; encode temporarily as UTF-8: reader = csv.reader( utf_8_encoder(unicode_csv_data), dialect=dialect, **kwargs ) for row in reader: # decode UTF-8 back to Unicode, cell by cell: yield [unicode(cell, "utf-8") for cell in row]
<SYSTEM_TASK:> Whether the stage file was created with `dvc import`. <END_TASK> <USER_TASK:> Description: def is_import(self): """Whether the stage file was created with `dvc import`."""
return not self.cmd and len(self.deps) == 1 and len(self.outs) == 1
<SYSTEM_TASK:> Checks if this stage has been already ran and stored <END_TASK> <USER_TASK:> Description: def is_cached(self): """ Checks if this stage has been already ran and stored """
from dvc.remote.local import RemoteLOCAL from dvc.remote.s3 import RemoteS3 old = Stage.load(self.repo, self.path) if old._changed_outs(): return False # NOTE: need to save checksums for deps in order to compare them # with what is written in the old stage. for dep in self.deps: dep.save() old_d = old.dumpd() new_d = self.dumpd() # NOTE: need to remove checksums from old dict in order to compare # it to the new one, since the new one doesn't have checksums yet. old_d.pop(self.PARAM_MD5, None) new_d.pop(self.PARAM_MD5, None) outs = old_d.get(self.PARAM_OUTS, []) for out in outs: out.pop(RemoteLOCAL.PARAM_CHECKSUM, None) out.pop(RemoteS3.PARAM_CHECKSUM, None) if old_d != new_d: return False # NOTE: committing to prevent potential data duplication. For example # # $ dvc config cache.type hardlink # $ echo foo > foo # $ dvc add foo # $ rm -f foo # $ echo foo > foo # $ dvc add foo # should replace foo with a link to cache # old.commit() return True
<SYSTEM_TASK:> Launch a `dvc daemon` command in a detached process. <END_TASK> <USER_TASK:> Description: def daemon(args): """Launch a `dvc daemon` command in a detached process. Args: args (list): list of arguments to append to `dvc daemon` command. """
if os.environ.get(DVC_DAEMON): logger.debug("skipping launching a new daemon.") return cmd = [sys.executable] if not is_binary(): cmd += ["-m", "dvc"] cmd += ["daemon", "-q"] + args env = fix_env() file_path = os.path.abspath(inspect.stack()[0][1]) env[cast_bytes_py2("PYTHONPATH")] = cast_bytes_py2( os.path.dirname(os.path.dirname(file_path)) ) env[cast_bytes_py2(DVC_DAEMON)] = cast_bytes_py2("1") _spawn(cmd, env)
<SYSTEM_TASK:> Format delimited text to have same column width. <END_TASK> <USER_TASK:> Description: def _format_csv(content, delimiter): """Format delimited text to have same column width. Args: content (str): The content of a metric. delimiter (str): Value separator Returns: str: Formatted content. Example: >>> content = ( "value_mse,deviation_mse,data_set\n" "0.421601,0.173461,train\n" "0.67528,0.289545,testing\n" "0.671502,0.297848,validation\n" ) >>> _format_csv(content, ",") "value_mse deviation_mse data_set\n" "0.421601 0.173461 train\n" "0.67528 0.289545 testing\n" "0.671502 0.297848 validation\n" """
reader = csv_reader(StringIO(content), delimiter=builtin_str(delimiter)) rows = [row for row in reader] max_widths = [max(map(len, column)) for column in zip(*rows)] lines = [ " ".join( "{entry:{width}}".format(entry=entry, width=width + 2) for entry, width in zip(row, max_widths) ) for row in rows ] return "\n".join(lines)
<SYSTEM_TASK:> Tabularize the content according to its type. <END_TASK> <USER_TASK:> Description: def _format_output(content, typ): """Tabularize the content according to its type. Args: content (str): The content of a metric. typ (str): The type of metric -- (raw|json|tsv|htsv|csv|hcsv). Returns: str: Content in a raw or tabular format. """
if "csv" in str(typ): return _format_csv(content, delimiter=",") if "tsv" in str(typ): return _format_csv(content, delimiter="\t") return content
<SYSTEM_TASK:> Gather all the metric outputs. <END_TASK> <USER_TASK:> Description: def _collect_metrics(repo, path, recursive, typ, xpath, branch): """Gather all the metric outputs. Args: path (str): Path to a metric file or a directory. recursive (bool): If path is a directory, do a recursive search for metrics on the given path. typ (str): The type of metric to search for, could be one of the following (raw|json|tsv|htsv|csv|hcsv). xpath (str): Path to search for. branch (str): Branch to look up for metrics. Returns: list(tuple): (output, typ, xpath) - output: - typ: - xpath: """
outs = [out for stage in repo.stages() for out in stage.outs] if path: try: outs = repo.find_outs_by_path(path, outs=outs, recursive=recursive) except OutputNotFoundError: logger.debug( "stage file not for found for '{}' in branch '{}'".format( path, branch ) ) return [] res = [] for o in outs: if not o.metric: continue if not typ and isinstance(o.metric, dict): t = o.metric.get(o.PARAM_METRIC_TYPE, typ) x = o.metric.get(o.PARAM_METRIC_XPATH, xpath) else: t = typ x = xpath res.append((o, t, x)) return res
<SYSTEM_TASK:> Read the content of each metric file and format it. <END_TASK> <USER_TASK:> Description: def _read_metrics(repo, metrics, branch): """Read the content of each metric file and format it. Args: metrics (list): List of metric touples branch (str): Branch to look up for metrics. Returns: A dict mapping keys with metrics path name and content. For example: {'metric.csv': ("value_mse deviation_mse data_set\n" "0.421601 0.173461 train\n" "0.67528 0.289545 testing\n" "0.671502 0.297848 validation\n")} """
res = {} for out, typ, xpath in metrics: assert out.scheme == "local" if not typ: typ = os.path.splitext(out.path.lower())[1].replace(".", "") if out.use_cache: open_fun = open path = repo.cache.local.get(out.checksum) else: open_fun = repo.tree.open path = out.path try: with open_fun(path) as fd: metric = _read_metric( fd, typ=typ, xpath=xpath, rel_path=out.rel_path, branch=branch, ) except IOError as e: if e.errno == errno.ENOENT: logger.warning( NO_METRICS_FILE_AT_REFERENCE_WARNING.format( out.rel_path, branch ) ) metric = None else: raise if not metric: continue res[out.rel_path] = metric return res
<SYSTEM_TASK:> Generate a graph by using the given stages on the given directory <END_TASK> <USER_TASK:> Description: def graph(self, stages=None, from_directory=None): """Generate a graph by using the given stages on the given directory The nodes of the graph are the stage's path relative to the root. Edges are created when the output of one stage is used as a dependency in other stage. The direction of the edges goes from the stage to its dependency: For example, running the following: $ dvc run -o A "echo A > A" $ dvc run -d A -o B "echo B > B" $ dvc run -d B -o C "echo C > C" Will create the following graph: ancestors <-- | C.dvc -> B.dvc -> A.dvc | | | --> descendants | ------- pipeline ------> | v (weakly connected components) Args: stages (list): used to build a graph, if None given, use the ones on the `from_directory`. from_directory (str): directory where to look at for stages, if None is given, use the current working directory Raises: OutputDuplicationError: two outputs with the same path StagePathAsOutputError: stage inside an output directory OverlappingOutputPathsError: output inside output directory CyclicGraphError: resulting graph has cycles """
import networkx as nx from dvc.exceptions import ( OutputDuplicationError, StagePathAsOutputError, OverlappingOutputPathsError, ) G = nx.DiGraph() G_active = nx.DiGraph() stages = stages or self.stages(from_directory, check_dag=False) stages = [stage for stage in stages if stage] outs = [] for stage in stages: for out in stage.outs: existing = [] for o in outs: if o.path == out.path: existing.append(o.stage) in_o_dir = out.path.startswith(o.path + o.sep) in_out_dir = o.path.startswith(out.path + out.sep) if in_o_dir or in_out_dir: raise OverlappingOutputPathsError(o, out) if existing: stages = [stage.relpath, existing[0].relpath] raise OutputDuplicationError(out.path, stages) outs.append(out) for stage in stages: path_dir = os.path.dirname(stage.path) + os.sep for out in outs: if path_dir.startswith(out.path + os.sep): raise StagePathAsOutputError(stage.wdir, stage.relpath) for stage in stages: node = os.path.relpath(stage.path, self.root_dir) G.add_node(node, stage=stage) G_active.add_node(node, stage=stage) for dep in stage.deps: for out in outs: if ( out.path != dep.path and not dep.path.startswith(out.path + out.sep) and not out.path.startswith(dep.path + dep.sep) ): continue dep_stage = out.stage dep_node = os.path.relpath(dep_stage.path, self.root_dir) G.add_node(dep_node, stage=dep_stage) G.add_edge(node, dep_node) if not stage.locked: G_active.add_node(dep_node, stage=dep_stage) G_active.add_edge(node, dep_node) self._check_cyclic_graph(G) return G, G_active
<SYSTEM_TASK:> Add a new line if progress bar hasn't finished <END_TASK> <USER_TASK:> Description: def _progress_aware(self): """Add a new line if progress bar hasn't finished"""
from dvc.progress import progress if not progress.is_finished: progress._print() progress.clearln()
<SYSTEM_TASK:> Read config for list object api, paginate through list objects. <END_TASK> <USER_TASK:> Description: def _list_paths(self, bucket, prefix): """ Read config for list object api, paginate through list objects."""
s3 = self.s3 kwargs = {"Bucket": bucket, "Prefix": prefix} if self.list_objects: list_objects_api = "list_objects" else: list_objects_api = "list_objects_v2" paginator = s3.get_paginator(list_objects_api) for page in paginator.paginate(**kwargs): contents = page.get("Contents", None) if not contents: continue for item in contents: yield item["Key"]
<SYSTEM_TASK:> Resolve path relative to config file location. <END_TASK> <USER_TASK:> Description: def resolve_path(path, config_file): """Resolve path relative to config file location. Args: path: Path to be resolved. config_file: Path to config file, which `path` is specified relative to. Returns: Path relative to the `config_file` location. If `path` is an absolute path then it will be returned without change. """
if os.path.isabs(path): return path return os.path.relpath(path, os.path.dirname(config_file))
<SYSTEM_TASK:> Checks if data has changed. <END_TASK> <USER_TASK:> Description: def changed(self, path_info, checksum_info): """Checks if data has changed. A file is considered changed if: - It doesn't exist on the working directory (was unlinked) - Checksum is not computed (saving a new file) - The checkusm stored in the State is different from the given one - There's no file in the cache Args: path_info: dict with path information. checksum: expected checksum for this data. Returns: bool: True if data has changed, False otherwise. """
logger.debug( "checking if '{}'('{}') has changed.".format( path_info, checksum_info ) ) if not self.exists(path_info): logger.debug("'{}' doesn't exist.".format(path_info)) return True checksum = checksum_info.get(self.PARAM_CHECKSUM) if checksum is None: logger.debug("checksum for '{}' is missing.".format(path_info)) return True if self.changed_cache(checksum): logger.debug( "cache for '{}'('{}') has changed.".format(path_info, checksum) ) return True actual = self.save_info(path_info)[self.PARAM_CHECKSUM] if checksum != actual: logger.debug( "checksum '{}'(actual '{}') for '{}' has changed.".format( checksum, actual, path_info ) ) return True logger.debug("'{}' hasn't changed.".format(path_info)) return False
<SYSTEM_TASK:> Ask the user for confirmation about the specified statement. <END_TASK> <USER_TASK:> Description: def confirm(statement): """Ask the user for confirmation about the specified statement. Args: statement (unicode): statement to ask the user confirmation about. Returns: bool: whether or not specified statement was confirmed. """
prompt = "{statement} [y/n]".format(statement=statement) answer = _ask(prompt, limited_to=["yes", "no", "y", "n"]) return answer and answer.startswith("y")
<SYSTEM_TASK:> Run dvc CLI command. <END_TASK> <USER_TASK:> Description: def main(argv=None): """Run dvc CLI command. Args: argv: optional list of arguments to parse. sys.argv is used by default. Returns: int: command's return code. """
args = None cmd = None try: args = parse_args(argv) if args.quiet: logger.setLevel(logging.CRITICAL) elif args.verbose: logger.setLevel(logging.DEBUG) cmd = args.func(args) ret = cmd.run_cmd() except KeyboardInterrupt: logger.exception("interrupted by the user") ret = 252 except NotDvcRepoError: logger.exception("") ret = 253 except DvcParserError: ret = 254 except Exception: # pylint: disable=broad-except logger.exception("unexpected error") ret = 255 Analytics().send_cmd(cmd, args, ret) return ret
<SYSTEM_TASK:> Checks if link type config option has a valid value. <END_TASK> <USER_TASK:> Description: def supported_cache_type(types): """Checks if link type config option has a valid value. Args: types (list/string): type(s) of links that dvc should try out. """
if isinstance(types, str): types = [typ.strip() for typ in types.split(",")] for typ in types: if typ not in ["reflink", "hardlink", "symlink", "copy"]: return False return True
<SYSTEM_TASK:> Loads config from all the config files. <END_TASK> <USER_TASK:> Description: def load(self, validate=True): """Loads config from all the config files. Args: validate (bool): optional flag to tell dvc if it should validate the config or just load it as is. 'True' by default. Raises: dvc.config.ConfigError: thrown if config has invalid format. """
self._load() try: self.config = self._load_config(self.system_config_file) user = self._load_config(self.global_config_file) config = self._load_config(self.config_file) local = self._load_config(self.config_local_file) # NOTE: schema doesn't support ConfigObj.Section validation, so we # need to convert our config to dict before passing it to for conf in [user, config, local]: self.config = self._merge(self.config, conf) if validate: self.config = Schema(self.SCHEMA).validate(self.config) # NOTE: now converting back to ConfigObj self.config = configobj.ConfigObj( self.config, write_empty_values=True ) self.config.filename = self.config_file self._resolve_paths(self.config, self.config_file) except Exception as ex: raise ConfigError(ex)
<SYSTEM_TASK:> Sets specified option in the config. <END_TASK> <USER_TASK:> Description: def set(config, section, opt, value): """Sets specified option in the config. Args: config (configobj.ConfigObj): config to work on. section (str): section name. opt (str): option name. value: value to set option to. """
if section not in config.keys(): config[section] = {} config[section][opt] = value
<SYSTEM_TASK:> Prints option value from the config. <END_TASK> <USER_TASK:> Description: def show(config, section, opt): """Prints option value from the config. Args: config (configobj.ConfigObj): config to work on. section (str): section name. opt (str): option name. """
if section not in config.keys(): raise ConfigError("section '{}' doesn't exist".format(section)) if opt not in config[section].keys(): raise ConfigError( "option '{}.{}' doesn't exist".format(section, opt) ) logger.info(config[section][opt])
<SYSTEM_TASK:> Renames an output file and modifies the stage associated <END_TASK> <USER_TASK:> Description: def move(self, from_path, to_path): """ Renames an output file and modifies the stage associated to reflect the change on the pipeline. If the output has the same name as its stage, it would also rename the corresponding stage file. E.g. Having: (hello, hello.dvc) $ dvc move hello greetings Result: (greeting, greeting.dvc) It only works with outputs generated by `add` or `import`, also known as data sources. """
import dvc.output as Output from dvc.stage import Stage from_out = Output.loads_from(Stage(self), [from_path])[0] to_path = _expand_target_path(from_path, to_path) outs = self.find_outs_by_path(from_out.path) assert len(outs) == 1 out = outs[0] stage = out.stage if not stage.is_data_source: raise MoveNotDataSourceError(stage.relpath) stage_name = os.path.splitext(os.path.basename(stage.path))[0] from_name = os.path.basename(from_out.path) if stage_name == from_name: os.unlink(stage.path) stage.path = os.path.join( os.path.dirname(to_path), os.path.basename(to_path) + Stage.STAGE_FILE_SUFFIX, ) stage.wdir = os.path.abspath( os.path.join(os.curdir, os.path.dirname(to_path)) ) to_out = Output.loads_from( stage, [os.path.basename(to_path)], out.use_cache, out.metric )[0] with self.state: out.move(to_out) stage.dump()
<SYSTEM_TASK:> Generate a version with information about the git repository <END_TASK> <USER_TASK:> Description: def _generate_version(base_version): """Generate a version with information about the git repository"""
pkg_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) if not _is_git_repo(pkg_dir) or not _have_git(): return base_version if _is_release(pkg_dir, base_version) and not _is_dirty(pkg_dir): return base_version return "{base_version}+{short_sha}{dirty}".format( base_version=base_version, short_sha=_git_revision(pkg_dir).decode("utf-8")[0:6], dirty=".mod" if _is_dirty(pkg_dir) else "", )
<SYSTEM_TASK:> Check whether a git repository has uncommitted changes. <END_TASK> <USER_TASK:> Description: def _is_dirty(dir_path): """Check whether a git repository has uncommitted changes."""
try: subprocess.check_call(["git", "diff", "--quiet"], cwd=dir_path) return False except subprocess.CalledProcessError: return True
<SYSTEM_TASK:> Exclude specified keys from a nested dict <END_TASK> <USER_TASK:> Description: def dict_filter(d, exclude=[]): """ Exclude specified keys from a nested dict """
if isinstance(d, list): ret = [] for e in d: ret.append(dict_filter(e, exclude)) return ret elif isinstance(d, dict): ret = {} for k, v in d.items(): if isinstance(k, builtin_str): k = str(k) assert isinstance(k, str) if k in exclude: continue ret[k] = dict_filter(v, exclude) return ret return d
<SYSTEM_TASK:> Copy file with progress bar <END_TASK> <USER_TASK:> Description: def copyfile(src, dest, no_progress_bar=False, name=None): """Copy file with progress bar"""
from dvc.progress import progress copied = 0 name = name if name else os.path.basename(dest) total = os.stat(src).st_size if os.path.isdir(dest): dest = os.path.join(dest, os.path.basename(src)) with open(src, "rb") as fsrc, open(dest, "wb+") as fdest: while True: buf = fsrc.read(LOCAL_CHUNK_SIZE) if not buf: break fdest.write(buf) copied += len(buf) if not no_progress_bar: progress.update_target(name, copied, total) if not no_progress_bar: progress.finish_target(name)
<SYSTEM_TASK:> Proxy for `os.walk` directory tree generator. <END_TASK> <USER_TASK:> Description: def dvc_walk( top, topdown=True, onerror=None, followlinks=False, ignore_file_handler=None, ): """ Proxy for `os.walk` directory tree generator. Utilizes DvcIgnoreFilter functionality. """
ignore_filter = None if topdown: from dvc.ignore import DvcIgnoreFilter ignore_filter = DvcIgnoreFilter( top, ignore_file_handler=ignore_file_handler ) for root, dirs, files in os.walk( top, topdown=topdown, onerror=onerror, followlinks=followlinks ): if ignore_filter: dirs[:], files[:] = ignore_filter(root, dirs, files) yield root, dirs, files
<SYSTEM_TASK:> Returns a message in a specified color. <END_TASK> <USER_TASK:> Description: def colorize(message, color=None): """Returns a message in a specified color."""
if not color: return message colors = { "green": colorama.Fore.GREEN, "yellow": colorama.Fore.YELLOW, "blue": colorama.Fore.BLUE, "red": colorama.Fore.RED, } return "{color}{message}{nc}".format( color=colors.get(color, ""), message=message, nc=colorama.Fore.RESET )
<SYSTEM_TASK:> Put a message inside a box. <END_TASK> <USER_TASK:> Description: def boxify(message, border_color=None): """Put a message inside a box. Args: message (unicode): message to decorate. border_color (unicode): name of the color to outline the box with. """
lines = message.split("\n") max_width = max(_visual_width(line) for line in lines) padding_horizontal = 5 padding_vertical = 1 box_size_horizontal = max_width + (padding_horizontal * 2) chars = {"corner": "+", "horizontal": "-", "vertical": "|", "empty": " "} margin = "{corner}{line}{corner}\n".format( corner=chars["corner"], line=chars["horizontal"] * box_size_horizontal ) padding_lines = [ "{border}{space}{border}\n".format( border=colorize(chars["vertical"], color=border_color), space=chars["empty"] * box_size_horizontal, ) * padding_vertical ] content_lines = [ "{border}{space}{content}{space}{border}\n".format( border=colorize(chars["vertical"], color=border_color), space=chars["empty"] * padding_horizontal, content=_visual_center(line, max_width), ) for line in lines ] box_str = "{margin}{padding}{content}{padding}{margin}".format( margin=colorize(margin, color=border_color), padding="".join(padding_lines), content="".join(content_lines), ) return box_str
<SYSTEM_TASK:> Get the the number of columns required to display a string <END_TASK> <USER_TASK:> Description: def _visual_width(line): """Get the the number of columns required to display a string"""
return len(re.sub(colorama.ansitowin32.AnsiToWin32.ANSI_CSI_RE, "", line))
<SYSTEM_TASK:> Center align string according to it's visual width <END_TASK> <USER_TASK:> Description: def _visual_center(line, width): """Center align string according to it's visual width"""
spaces = max(width - _visual_width(line), 0) left_padding = int(spaces / 2) right_padding = spaces - left_padding return (left_padding * " ") + line + (right_padding * " ")
<SYSTEM_TASK:> Returns SCM instance that corresponds to a repo at the specified <END_TASK> <USER_TASK:> Description: def SCM(root_dir, repo=None): # pylint: disable=invalid-name """Returns SCM instance that corresponds to a repo at the specified path. Args: root_dir (str): path to a root directory of the repo. repo (dvc.repo.Repo): dvc repo instance that root_dir belongs to. Returns: dvc.scm.base.Base: SCM instance. """
if Git.is_repo(root_dir) or Git.is_submodule(root_dir): return Git(root_dir, repo=repo) return NoSCM(root_dir, repo=repo)
<SYSTEM_TASK:> Create instances of a parser containing common arguments shared among <END_TASK> <USER_TASK:> Description: def get_parent_parser(): """Create instances of a parser containing common arguments shared among all the commands. When overwritting `-q` or `-v`, you need to instantiate a new object in order to prevent some weird behavior. """
parent_parser = argparse.ArgumentParser(add_help=False) log_level_group = parent_parser.add_mutually_exclusive_group() log_level_group.add_argument( "-q", "--quiet", action="store_true", default=False, help="Be quiet." ) log_level_group.add_argument( "-v", "--verbose", action="store_true", default=False, help="Be verbose.", ) return parent_parser
<SYSTEM_TASK:> Parses CLI arguments. <END_TASK> <USER_TASK:> Description: def parse_args(argv=None): """Parses CLI arguments. Args: argv: optional list of arguments to parse. sys.argv is used by default. Raises: dvc.exceptions.DvcParserError: raised for argument parsing errors. """
parent_parser = get_parent_parser() # Main parser desc = "Data Version Control" parser = DvcParser( prog="dvc", description=desc, parents=[parent_parser], formatter_class=argparse.RawTextHelpFormatter, ) # NOTE: On some python versions action='version' prints to stderr # instead of stdout https://bugs.python.org/issue18920 parser.add_argument( "-V", "--version", action=VersionAction, nargs=0, help="Show program's version.", ) # Sub commands subparsers = parser.add_subparsers( title="Available Commands", metavar="COMMAND", dest="cmd", help="Use dvc COMMAND --help for command-specific help.", ) fix_subparsers(subparsers) for cmd in COMMANDS: cmd.add_parser(subparsers, parent_parser) args = parser.parse_args(argv) return args
<SYSTEM_TASK:> Recursively apply changes from src to dest. <END_TASK> <USER_TASK:> Description: def apply_diff(src, dest): """Recursively apply changes from src to dest. Preserves dest type and hidden info in dest structure, like ruamel.yaml leaves when parses files. This includes comments, ordering and line foldings. Used in Stage load/dump cycle to preserve comments and custom formatting. """
Seq = (list, tuple) Container = (Mapping, list, tuple) def is_same_type(a, b): return any( isinstance(a, t) and isinstance(b, t) for t in [str, Mapping, Seq, bool] ) if isinstance(src, Mapping) and isinstance(dest, Mapping): for key, value in src.items(): if isinstance(value, Container) and is_same_type( value, dest.get(key) ): apply_diff(value, dest[key]) elif key not in dest or value != dest[key]: dest[key] = value for key in set(dest) - set(src): del dest[key] elif isinstance(src, Seq) and isinstance(dest, Seq): if len(src) != len(dest): dest[:] = src else: for i, value in enumerate(src): if isinstance(value, Container) and is_same_type( value, dest[i] ): apply_diff(value, dest[i]) elif value != dest[i]: dest[i] = value else: raise AssertionError( "Can't apply diff from {} to {}".format( src.__class__.__name__, dest.__class__.__name__ ) )
<SYSTEM_TASK:> Loads analytics report from json file specified by path. <END_TASK> <USER_TASK:> Description: def load(path): """Loads analytics report from json file specified by path. Args: path (str): path to json file with analytics report. """
with open(path, "r") as fobj: analytics = Analytics(info=json.load(fobj)) os.unlink(path) return analytics
<SYSTEM_TASK:> Collect analytics info from a CLI command. <END_TASK> <USER_TASK:> Description: def collect_cmd(self, args, ret): """Collect analytics info from a CLI command."""
from dvc.command.daemon import CmdDaemonAnalytics assert isinstance(ret, int) or ret is None if ret is not None: self.info[self.PARAM_CMD_RETURN_CODE] = ret if args is not None and hasattr(args, "func"): assert args.func != CmdDaemonAnalytics self.info[self.PARAM_CMD_CLASS] = args.func.__name__
<SYSTEM_TASK:> Save analytics report to a temporary file. <END_TASK> <USER_TASK:> Description: def dump(self): """Save analytics report to a temporary file. Returns: str: path to the temporary file that contains the analytics report. """
import tempfile with tempfile.NamedTemporaryFile(delete=False, mode="w") as fobj: json.dump(self.info, fobj) return fobj.name
<SYSTEM_TASK:> Collect and send analytics for CLI command. <END_TASK> <USER_TASK:> Description: def send_cmd(cmd, args, ret): """Collect and send analytics for CLI command. Args: args (list): parsed args for the CLI command. ret (int): return value of the CLI command. """
from dvc.daemon import daemon if not Analytics._is_enabled(cmd): return analytics = Analytics() analytics.collect_cmd(args, ret) daemon(["analytics", analytics.dump()])
<SYSTEM_TASK:> Push data items in a cloud-agnostic way. <END_TASK> <USER_TASK:> Description: def push(self, targets, jobs=None, remote=None, show_checksums=False): """Push data items in a cloud-agnostic way. Args: targets (list): list of targets to push to the cloud. jobs (int): number of jobs that can be running simultaneously. remote (dvc.remote.base.RemoteBase): optional remote to push to. By default remote from core.remote config option is used. show_checksums (bool): show checksums instead of file names in information messages. """
return self.repo.cache.local.push( targets, jobs=jobs, remote=self._get_cloud(remote, "push"), show_checksums=show_checksums, )
<SYSTEM_TASK:> Check status of data items in a cloud-agnostic way. <END_TASK> <USER_TASK:> Description: def status(self, targets, jobs=None, remote=None, show_checksums=False): """Check status of data items in a cloud-agnostic way. Args: targets (list): list of targets to check status for. jobs (int): number of jobs that can be running simultaneously. remote (dvc.remote.base.RemoteBase): optional remote to compare targets to. By default remote from core.remote config option is used. show_checksums (bool): show checksums instead of file names in information messages. """
cloud = self._get_cloud(remote, "status") return self.repo.cache.local.status( targets, jobs=jobs, remote=cloud, show_checksums=show_checksums )
<SYSTEM_TASK:> Generator that iterates over specified revisions. <END_TASK> <USER_TASK:> Description: def brancher( # noqa: E302 self, branches=None, all_branches=False, tags=None, all_tags=False ): """Generator that iterates over specified revisions. Args: branches (list): a list of branches to iterate over. all_branches (bool): iterate over all available branches. tags (list): a list of tags to iterate over. all_tags (bool): iterate over all available tags. Yields: str: the display name for the currently selected tree, it could be: - a git revision identifier - empty string it there is no branches to iterate over - "Working Tree" if there are uncommited changes in the SCM repo """
if not any([branches, all_branches, tags, all_tags]): yield "" return saved_tree = self.tree revs = [] scm = self.scm if self.scm.is_dirty(): from dvc.scm.tree import WorkingTree self.tree = WorkingTree(self.root_dir) yield "Working Tree" if all_branches: branches = scm.list_branches() if all_tags: tags = scm.list_tags() if branches is None: revs.extend([scm.active_branch()]) else: revs.extend(branches) if tags is not None: revs.extend(tags) # NOTE: it might be a good idea to wrap this loop in try/finally block # to don't leave the tree on some unexpected branch after the # `brancher()`, but this could cause problems on exception handling # code which might expect the tree on which exception was raised to # stay in place. This behavior is a subject to change. for rev in revs: self.tree = scm.get_tree(rev) yield rev self.tree = saved_tree
<SYSTEM_TASK:> Save checksum for the specified path info. <END_TASK> <USER_TASK:> Description: def save(self, path_info, checksum): """Save checksum for the specified path info. Args: path_info (dict): path_info to save checksum for. checksum (str): checksum to save. """
assert path_info["scheme"] == "local" assert checksum is not None path = path_info["path"] assert os.path.exists(path) actual_mtime, actual_size = get_mtime_and_size(path) actual_inode = get_inode(path) existing_record = self.get_state_record_for_inode(actual_inode) if not existing_record: self._insert_new_state_record( path, actual_inode, actual_mtime, actual_size, checksum ) return self._update_state_for_path_changed( path, actual_inode, actual_mtime, actual_size, checksum )
<SYSTEM_TASK:> Gets the checksum for the specified path info. Checksum will be <END_TASK> <USER_TASK:> Description: def get(self, path_info): """Gets the checksum for the specified path info. Checksum will be retrieved from the state database if available. Args: path_info (dict): path info to get the checksum for. Returns: str or None: checksum for the specified path info or None if it doesn't exist in the state database. """
assert path_info["scheme"] == "local" path = path_info["path"] if not os.path.exists(path): return None actual_mtime, actual_size = get_mtime_and_size(path) actual_inode = get_inode(path) existing_record = self.get_state_record_for_inode(actual_inode) if not existing_record: return None mtime, size, checksum, _ = existing_record if self._file_metadata_changed(actual_mtime, mtime, actual_size, size): return None self._update_state_record_timestamp_for_inode(actual_inode) return checksum
<SYSTEM_TASK:> Adds the specified path to the list of links created by dvc. This <END_TASK> <USER_TASK:> Description: def save_link(self, path_info): """Adds the specified path to the list of links created by dvc. This list is later used on `dvc checkout` to cleanup old links. Args: path_info (dict): path info to add to the list of links. """
assert path_info["scheme"] == "local" path = path_info["path"] if not os.path.exists(path): return mtime, _ = get_mtime_and_size(path) inode = get_inode(path) relpath = os.path.relpath(path, self.root_dir) cmd = ( "REPLACE INTO {}(path, inode, mtime) " 'VALUES ("{}", {}, "{}")'.format( self.LINK_STATE_TABLE, relpath, self._to_sqlite(inode), mtime ) ) self._execute(cmd)
<SYSTEM_TASK:> Removes all saved links except the ones that are used. <END_TASK> <USER_TASK:> Description: def remove_unused_links(self, used): """Removes all saved links except the ones that are used. Args: used (list): list of used links that should not be removed. """
unused = [] self._execute("SELECT * FROM {}".format(self.LINK_STATE_TABLE)) for row in self.cursor: relpath, inode, mtime = row inode = self._from_sqlite(inode) path = os.path.join(self.root_dir, relpath) if path in used: continue if not os.path.exists(path): continue actual_inode = get_inode(path) actual_mtime, _ = get_mtime_and_size(path) if inode == actual_inode and mtime == actual_mtime: logger.debug("Removing '{}' as unused link.".format(path)) remove(path) unused.append(relpath) for relpath in unused: cmd = 'DELETE FROM {} WHERE path = "{}"' self._execute(cmd.format(self.LINK_STATE_TABLE, relpath))
<SYSTEM_TASK:> Acquire lock for dvc repo. <END_TASK> <USER_TASK:> Description: def lock(self): """Acquire lock for dvc repo."""
try: self._do_lock() return except LockError: time.sleep(self.TIMEOUT) self._do_lock()
<SYSTEM_TASK:> Set the scroll region on the canvas <END_TASK> <USER_TASK:> Description: def set_scrollregion(self, event=None): """ Set the scroll region on the canvas"""
self.canvas.configure(scrollregion=self.canvas.bbox('all'))
<SYSTEM_TASK:> Updated calendar to show the previous month. <END_TASK> <USER_TASK:> Description: def _prev_month(self): """Updated calendar to show the previous month."""
self._canvas.place_forget() self._date = self._date - self.timedelta(days=1) self._date = self.datetime(self._date.year, self._date.month, 1) self._build_calendar()
<SYSTEM_TASK:> Update calendar to show the next month. <END_TASK> <USER_TASK:> Description: def _next_month(self): """Update calendar to show the next month."""
self._canvas.place_forget() year, month = self._date.year, self._date.month self._date = self._date + self.timedelta( days=calendar.monthrange(year, month)[1] + 1) self._date = self.datetime(self._date.year, self._date.month, 1) self._build_calendar()
<SYSTEM_TASK:> Return a datetime representing the current selected date. <END_TASK> <USER_TASK:> Description: def selection(self): """Return a datetime representing the current selected date."""
if not self._selection: return None year, month = self._date.year, self._date.month return self.datetime(year, month, int(self._selection[0]))
<SYSTEM_TASK:> Same as verbose_ping, but the results are returned as tuple <END_TASK> <USER_TASK:> Description: def quiet_ping(hostname, timeout=WAIT_TIMEOUT, count=NUM_PACKETS, packet_size=PACKET_SIZE, path_finder=False): """ Same as verbose_ping, but the results are returned as tuple """
myStats = MyStats() # Reset the stats mySeqNumber = 0 # Starting value try: destIP = socket.gethostbyname(hostname) except socket.gaierror as e: return 0,0,0,0 myStats.thisIP = destIP # This will send packet that we dont care about 0.5 seconds before it starts # acrutally pinging. This is needed in big MAN/LAN networks where you sometimes # loose the first packet. (while the switches find the way... :/ ) if path_finder: fakeStats = MyStats() do_one(fakeStats, destIP, hostname, timeout, mySeqNumber, packet_size, quiet=True) time.sleep(0.5) for i in range(count): delay = do_one(myStats, destIP, hostname, timeout, mySeqNumber, packet_size, quiet=True) if delay == None: delay = 0 mySeqNumber += 1 # Pause for the remainder of the MAX_SLEEP period (if applicable) if (MAX_SLEEP > delay): time.sleep((MAX_SLEEP - delay)/1000) if myStats.pktsSent > 0: myStats.fracLoss = (myStats.pktsSent - myStats.pktsRcvd)/myStats.pktsSent if myStats.pktsRcvd > 0: myStats.avrgTime = myStats.totTime / myStats.pktsRcvd # return tuple(max_rtt, min_rtt, avrg_rtt, percent_lost) return myStats.maxTime, myStats.minTime, myStats.avrgTime, myStats.fracLoss
<SYSTEM_TASK:> Returns a human readable string reprentation of bytes <END_TASK> <USER_TASK:> Description: def human_size(bytes, units=[' bytes','KB','MB','GB','TB', 'PB', 'EB']): """ Returns a human readable string reprentation of bytes"""
return str(bytes) + units[0] if bytes < 1024 else human_size(bytes>>10, units[1:])
<SYSTEM_TASK:> The selection event of the listView, returns a key of the clicked event. <END_TASK> <USER_TASK:> Description: def list_view_on_selected(self, widget, selected_item_key): """ The selection event of the listView, returns a key of the clicked event. You can retrieve the item rapidly """
self.lbl.set_text('List selection: ' + self.listView.children[selected_item_key].get_text())
<SYSTEM_TASK:> Receive the ping from the socket. Timeout = in ms <END_TASK> <USER_TASK:> Description: def receive_one_ping(mySocket, myID, timeout): """ Receive the ping from the socket. Timeout = in ms """
timeLeft = timeout/1000 while True: # Loop while waiting for packet or timeout startedSelect = default_timer() whatReady = select.select([mySocket], [], [], timeLeft) howLongInSelect = (default_timer() - startedSelect) if whatReady[0] == []: # Timeout return None, 0, 0, 0, 0 timeReceived = default_timer() recPacket, addr = mySocket.recvfrom(ICMP_MAX_RECV) ipHeader = recPacket[:20] iphVersion, iphTypeOfSvc, iphLength, \ iphID, iphFlags, iphTTL, iphProtocol, \ iphChecksum, iphSrcIP, iphDestIP = struct.unpack( "!BBHHHBBHII", ipHeader ) icmpHeader = recPacket[20:28] icmpType, icmpCode, icmpChecksum, \ icmpPacketID, icmpSeqNumber = struct.unpack( "!BBHHH", icmpHeader ) if icmpPacketID == myID: # Our packet dataSize = len(recPacket) - 28 #print (len(recPacket.encode())) return timeReceived, (dataSize+8), iphSrcIP, icmpSeqNumber, iphTTL timeLeft = timeLeft - howLongInSelect if timeLeft <= 0: return None, 0, 0, 0, 0
<SYSTEM_TASK:> Exports a ModuleSpec with weights taken from a checkpoint. <END_TASK> <USER_TASK:> Description: def export(self, path, _sentinel=None, # pylint: disable=invalid-name checkpoint_path=None, name_transform_fn=None): """Exports a ModuleSpec with weights taken from a checkpoint. This is an helper to export modules directly from a ModuleSpec without having to create a session and set the variables to the intended values. Example usage: ```python spec = hub.create_module_spec(module_fn) spec.export("/path/to/export_module", checkpoint_path="/path/to/training_model") ``` In some cases, the variable name in the checkpoint does not match the variable name in the module. It is possible to work around that by providing a checkpoint_map_fn that performs the variable mapping. For example with: `name_transform_fn = lambda x: "extra_scope/" + x`. Args: path: path where to export the module to. _sentinel: used to prevent positional arguments besides `path`. checkpoint_path: path where to load the weights for the module. Mandatory parameter and must be passed by name. name_transform_fn: optional function to provide mapping between variable name in the module and the variable name in the checkpoint. Raises: ValueError: if missing mandatory `checkpoint_path` parameter. """
from tensorflow_hub.module import export_module_spec # pylint: disable=g-import-not-at-top if not checkpoint_path: raise ValueError("Missing mandatory `checkpoint_path` parameter") name_transform_fn = name_transform_fn or (lambda x: x) export_module_spec(self, path, checkpoint_path, name_transform_fn)
<SYSTEM_TASK:> Returns the message attached to the module under the given key, or None. <END_TASK> <USER_TASK:> Description: def get_attached_message(self, key, message_type, tags=None, required=False): """Returns the message attached to the module under the given key, or None. Module publishers can attach protocol messages to modules at creation time to provide module consumers with additional information, e.g., on module usage or provenance (see see hub.attach_message()). A typical use would be to store a small set of named values with modules of a certain type so that a support library for consumers of such modules can be parametric in those values. This method can also be called on a Module instantiated from a ModuleSpec, then `tags` are set to those used in module instatiation. Args: key: A string with the key of an attached message. message_type: A concrete protocol message class (*not* object) used to parse the attached message from its serialized representation. The message type for a particular key must be advertised with the key. tags: Optional set of strings, specifying the graph variant from which to read the attached message. required: An optional boolean. Setting it true changes the effect of an unknown `key` from returning None to raising a KeyError with text about attached messages. Returns: An instance of `message_type` with the message contents attached to the module, or `None` if `key` is unknown and `required` is False. Raises: KeyError: if `key` is unknown and `required` is True. """
attached_bytes = self._get_attached_bytes(key, tags) if attached_bytes is None: if required: raise KeyError("No attached message for key '%s' in graph version %s " "of Hub Module" % (key, sorted(tags or []))) else: return None message = message_type() message.ParseFromString(attached_bytes) return message
<SYSTEM_TASK:> Builds a list of training images from the file system. <END_TASK> <USER_TASK:> Description: def create_image_lists(image_dir, testing_percentage, validation_percentage): """Builds a list of training images from the file system. Analyzes the sub folders in the image directory, splits them into stable training, testing, and validation sets, and returns a data structure describing the lists of images for each label and their paths. Args: image_dir: String path to a folder containing subfolders of images. testing_percentage: Integer percentage of the images to reserve for tests. validation_percentage: Integer percentage of images reserved for validation. Returns: An OrderedDict containing an entry for each label subfolder, with images split into training, testing, and validation sets within each label. The order of items defines the class indices. """
if not tf.gfile.Exists(image_dir): tf.logging.error("Image directory '" + image_dir + "' not found.") return None result = collections.OrderedDict() sub_dirs = sorted(x[0] for x in tf.gfile.Walk(image_dir)) # The root directory comes first, so skip it. is_root_dir = True for sub_dir in sub_dirs: if is_root_dir: is_root_dir = False continue extensions = sorted(set(os.path.normcase(ext) # Smash case on Windows. for ext in ['JPEG', 'JPG', 'jpeg', 'jpg', 'png'])) file_list = [] dir_name = os.path.basename( # tf.gfile.Walk() returns sub-directory with trailing '/' when it is in # Google Cloud Storage, which confuses os.path.basename(). sub_dir[:-1] if sub_dir.endswith('/') else sub_dir) if dir_name == image_dir: continue tf.logging.info("Looking for images in '" + dir_name + "'") for extension in extensions: file_glob = os.path.join(image_dir, dir_name, '*.' + extension) file_list.extend(tf.gfile.Glob(file_glob)) if not file_list: tf.logging.warning('No files found') continue if len(file_list) < 20: tf.logging.warning( 'WARNING: Folder has less than 20 images, which may cause issues.') elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS: tf.logging.warning( 'WARNING: Folder {} has more than {} images. Some images will ' 'never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS)) label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower()) training_images = [] testing_images = [] validation_images = [] for file_name in file_list: base_name = os.path.basename(file_name) # We want to ignore anything after '_nohash_' in the file name when # deciding which set to put an image in, the data set creator has a way of # grouping photos that are close variations of each other. For example # this is used in the plant disease data set to group multiple pictures of # the same leaf. hash_name = re.sub(r'_nohash_.*$', '', file_name) # This looks a bit magical, but we need to decide whether this file should # go into the training, testing, or validation sets, and we want to keep # existing files in the same set even if more files are subsequently # added. # To do that, we need a stable way of deciding based on just the file name # itself, so we do a hash of that and then use that to generate a # probability value that we use to assign it. hash_name_hashed = hashlib.sha1(tf.compat.as_bytes(hash_name)).hexdigest() percentage_hash = ((int(hash_name_hashed, 16) % (MAX_NUM_IMAGES_PER_CLASS + 1)) * (100.0 / MAX_NUM_IMAGES_PER_CLASS)) if percentage_hash < validation_percentage: validation_images.append(base_name) elif percentage_hash < (testing_percentage + validation_percentage): testing_images.append(base_name) else: training_images.append(base_name) result[label_name] = { 'dir': dir_name, 'training': training_images, 'testing': testing_images, 'validation': validation_images, } return result
<SYSTEM_TASK:> Returns a path to an image for a label at the given index. <END_TASK> <USER_TASK:> Description: def get_image_path(image_lists, label_name, index, image_dir, category): """Returns a path to an image for a label at the given index. Args: image_lists: OrderedDict of training images for each label. label_name: Label string we want to get an image for. index: Int offset of the image we want. This will be moduloed by the available number of images for the label, so it can be arbitrarily large. image_dir: Root folder string of the subfolders containing the training images. category: Name string of set to pull images from - training, testing, or validation. Returns: File system path string to an image that meets the requested parameters. """
if label_name not in image_lists: tf.logging.fatal('Label does not exist %s.', label_name) label_lists = image_lists[label_name] if category not in label_lists: tf.logging.fatal('Category does not exist %s.', category) category_list = label_lists[category] if not category_list: tf.logging.fatal('Label %s has no images in the category %s.', label_name, category) mod_index = index % len(category_list) base_name = category_list[mod_index] sub_dir = label_lists['dir'] full_path = os.path.join(image_dir, sub_dir, base_name) return full_path
<SYSTEM_TASK:> Returns a path to a bottleneck file for a label at the given index. <END_TASK> <USER_TASK:> Description: def get_bottleneck_path(image_lists, label_name, index, bottleneck_dir, category, module_name): """Returns a path to a bottleneck file for a label at the given index. Args: image_lists: OrderedDict of training images for each label. label_name: Label string we want to get an image for. index: Integer offset of the image we want. This will be moduloed by the available number of images for the label, so it can be arbitrarily large. bottleneck_dir: Folder string holding cached files of bottleneck values. category: Name string of set to pull images from - training, testing, or validation. module_name: The name of the image module being used. Returns: File system path string to an image that meets the requested parameters. """
module_name = (module_name.replace('://', '~') # URL scheme. .replace('/', '~') # URL and Unix paths. .replace(':', '~').replace('\\', '~')) # Windows paths. return get_image_path(image_lists, label_name, index, bottleneck_dir, category) + '_' + module_name + '.txt'
<SYSTEM_TASK:> Creates a graph and loads Hub Module into it. <END_TASK> <USER_TASK:> Description: def create_module_graph(module_spec): """Creates a graph and loads Hub Module into it. Args: module_spec: the hub.ModuleSpec for the image module being used. Returns: graph: the tf.Graph that was created. bottleneck_tensor: the bottleneck values output by the module. resized_input_tensor: the input images, resized as expected by the module. wants_quantization: a boolean, whether the module has been instrumented with fake quantization ops. """
height, width = hub.get_expected_image_size(module_spec) with tf.Graph().as_default() as graph: resized_input_tensor = tf.placeholder(tf.float32, [None, height, width, 3]) m = hub.Module(module_spec) bottleneck_tensor = m(resized_input_tensor) wants_quantization = any(node.op in FAKE_QUANT_OPS for node in graph.as_graph_def().node) return graph, bottleneck_tensor, resized_input_tensor, wants_quantization
<SYSTEM_TASK:> Runs inference on an image to extract the 'bottleneck' summary layer. <END_TASK> <USER_TASK:> Description: def run_bottleneck_on_image(sess, image_data, image_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor): """Runs inference on an image to extract the 'bottleneck' summary layer. Args: sess: Current active TensorFlow Session. image_data: String of raw JPEG data. image_data_tensor: Input data layer in the graph. decoded_image_tensor: Output of initial image resizing and preprocessing. resized_input_tensor: The input node of the recognition graph. bottleneck_tensor: Layer before the final softmax. Returns: Numpy array of bottleneck values. """
# First decode the JPEG image, resize it, and rescale the pixel values. resized_input_values = sess.run(decoded_image_tensor, {image_data_tensor: image_data}) # Then run it through the recognition network. bottleneck_values = sess.run(bottleneck_tensor, {resized_input_tensor: resized_input_values}) bottleneck_values = np.squeeze(bottleneck_values) return bottleneck_values
<SYSTEM_TASK:> Create a single bottleneck file. <END_TASK> <USER_TASK:> Description: def create_bottleneck_file(bottleneck_path, image_lists, label_name, index, image_dir, category, sess, jpeg_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor): """Create a single bottleneck file."""
tf.logging.debug('Creating bottleneck at ' + bottleneck_path) image_path = get_image_path(image_lists, label_name, index, image_dir, category) if not tf.gfile.Exists(image_path): tf.logging.fatal('File does not exist %s', image_path) image_data = tf.gfile.GFile(image_path, 'rb').read() try: bottleneck_values = run_bottleneck_on_image( sess, image_data, jpeg_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor) except Exception as e: raise RuntimeError('Error during processing file %s (%s)' % (image_path, str(e))) bottleneck_string = ','.join(str(x) for x in bottleneck_values) with open(bottleneck_path, 'w') as bottleneck_file: bottleneck_file.write(bottleneck_string)
<SYSTEM_TASK:> Retrieves or calculates bottleneck values for an image. <END_TASK> <USER_TASK:> Description: def get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir, category, bottleneck_dir, jpeg_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor, module_name): """Retrieves or calculates bottleneck values for an image. If a cached version of the bottleneck data exists on-disk, return that, otherwise calculate the data and save it to disk for future use. Args: sess: The current active TensorFlow Session. image_lists: OrderedDict of training images for each label. label_name: Label string we want to get an image for. index: Integer offset of the image we want. This will be modulo-ed by the available number of images for the label, so it can be arbitrarily large. image_dir: Root folder string of the subfolders containing the training images. category: Name string of which set to pull images from - training, testing, or validation. bottleneck_dir: Folder string holding cached files of bottleneck values. jpeg_data_tensor: The tensor to feed loaded jpeg data into. decoded_image_tensor: The output of decoding and resizing the image. resized_input_tensor: The input node of the recognition graph. bottleneck_tensor: The output tensor for the bottleneck values. module_name: The name of the image module being used. Returns: Numpy array of values produced by the bottleneck layer for the image. """
label_lists = image_lists[label_name] sub_dir = label_lists['dir'] sub_dir_path = os.path.join(bottleneck_dir, sub_dir) ensure_dir_exists(sub_dir_path) bottleneck_path = get_bottleneck_path(image_lists, label_name, index, bottleneck_dir, category, module_name) if not os.path.exists(bottleneck_path): create_bottleneck_file(bottleneck_path, image_lists, label_name, index, image_dir, category, sess, jpeg_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor) with open(bottleneck_path, 'r') as bottleneck_file: bottleneck_string = bottleneck_file.read() did_hit_error = False try: bottleneck_values = [float(x) for x in bottleneck_string.split(',')] except ValueError: tf.logging.warning('Invalid float found, recreating bottleneck') did_hit_error = True if did_hit_error: create_bottleneck_file(bottleneck_path, image_lists, label_name, index, image_dir, category, sess, jpeg_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor) with open(bottleneck_path, 'r') as bottleneck_file: bottleneck_string = bottleneck_file.read() # Allow exceptions to propagate here, since they shouldn't happen after a # fresh creation bottleneck_values = [float(x) for x in bottleneck_string.split(',')] return bottleneck_values
<SYSTEM_TASK:> Ensures all the training, testing, and validation bottlenecks are cached. <END_TASK> <USER_TASK:> Description: def cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir, jpeg_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor, module_name): """Ensures all the training, testing, and validation bottlenecks are cached. Because we're likely to read the same image multiple times (if there are no distortions applied during training) it can speed things up a lot if we calculate the bottleneck layer values once for each image during preprocessing, and then just read those cached values repeatedly during training. Here we go through all the images we've found, calculate those values, and save them off. Args: sess: The current active TensorFlow Session. image_lists: OrderedDict of training images for each label. image_dir: Root folder string of the subfolders containing the training images. bottleneck_dir: Folder string holding cached files of bottleneck values. jpeg_data_tensor: Input tensor for jpeg data from file. decoded_image_tensor: The output of decoding and resizing the image. resized_input_tensor: The input node of the recognition graph. bottleneck_tensor: The penultimate output layer of the graph. module_name: The name of the image module being used. Returns: Nothing. """
how_many_bottlenecks = 0 ensure_dir_exists(bottleneck_dir) for label_name, label_lists in image_lists.items(): for category in ['training', 'testing', 'validation']: category_list = label_lists[category] for index, unused_base_name in enumerate(category_list): get_or_create_bottleneck( sess, image_lists, label_name, index, image_dir, category, bottleneck_dir, jpeg_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor, module_name) how_many_bottlenecks += 1 if how_many_bottlenecks % 100 == 0: tf.logging.info( str(how_many_bottlenecks) + ' bottleneck files created.')
<SYSTEM_TASK:> Retrieves bottleneck values for cached images. <END_TASK> <USER_TASK:> Description: def get_random_cached_bottlenecks(sess, image_lists, how_many, category, bottleneck_dir, image_dir, jpeg_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor, module_name): """Retrieves bottleneck values for cached images. If no distortions are being applied, this function can retrieve the cached bottleneck values directly from disk for images. It picks a random set of images from the specified category. Args: sess: Current TensorFlow Session. image_lists: OrderedDict of training images for each label. how_many: If positive, a random sample of this size will be chosen. If negative, all bottlenecks will be retrieved. category: Name string of which set to pull from - training, testing, or validation. bottleneck_dir: Folder string holding cached files of bottleneck values. image_dir: Root folder string of the subfolders containing the training images. jpeg_data_tensor: The layer to feed jpeg image data into. decoded_image_tensor: The output of decoding and resizing the image. resized_input_tensor: The input node of the recognition graph. bottleneck_tensor: The bottleneck output layer of the CNN graph. module_name: The name of the image module being used. Returns: List of bottleneck arrays, their corresponding ground truths, and the relevant filenames. """
class_count = len(image_lists.keys()) bottlenecks = [] ground_truths = [] filenames = [] if how_many >= 0: # Retrieve a random sample of bottlenecks. for unused_i in range(how_many): label_index = random.randrange(class_count) label_name = list(image_lists.keys())[label_index] image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1) image_name = get_image_path(image_lists, label_name, image_index, image_dir, category) bottleneck = get_or_create_bottleneck( sess, image_lists, label_name, image_index, image_dir, category, bottleneck_dir, jpeg_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor, module_name) bottlenecks.append(bottleneck) ground_truths.append(label_index) filenames.append(image_name) else: # Retrieve all bottlenecks. for label_index, label_name in enumerate(image_lists.keys()): for image_index, image_name in enumerate( image_lists[label_name][category]): image_name = get_image_path(image_lists, label_name, image_index, image_dir, category) bottleneck = get_or_create_bottleneck( sess, image_lists, label_name, image_index, image_dir, category, bottleneck_dir, jpeg_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor, module_name) bottlenecks.append(bottleneck) ground_truths.append(label_index) filenames.append(image_name) return bottlenecks, ground_truths, filenames
<SYSTEM_TASK:> Retrieves bottleneck values for training images, after distortions. <END_TASK> <USER_TASK:> Description: def get_random_distorted_bottlenecks( sess, image_lists, how_many, category, image_dir, input_jpeg_tensor, distorted_image, resized_input_tensor, bottleneck_tensor): """Retrieves bottleneck values for training images, after distortions. If we're training with distortions like crops, scales, or flips, we have to recalculate the full model for every image, and so we can't use cached bottleneck values. Instead we find random images for the requested category, run them through the distortion graph, and then the full graph to get the bottleneck results for each. Args: sess: Current TensorFlow Session. image_lists: OrderedDict of training images for each label. how_many: The integer number of bottleneck values to return. category: Name string of which set of images to fetch - training, testing, or validation. image_dir: Root folder string of the subfolders containing the training images. input_jpeg_tensor: The input layer we feed the image data to. distorted_image: The output node of the distortion graph. resized_input_tensor: The input node of the recognition graph. bottleneck_tensor: The bottleneck output layer of the CNN graph. Returns: List of bottleneck arrays and their corresponding ground truths. """
class_count = len(image_lists.keys()) bottlenecks = [] ground_truths = [] for unused_i in range(how_many): label_index = random.randrange(class_count) label_name = list(image_lists.keys())[label_index] image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1) image_path = get_image_path(image_lists, label_name, image_index, image_dir, category) if not tf.gfile.Exists(image_path): tf.logging.fatal('File does not exist %s', image_path) jpeg_data = tf.gfile.GFile(image_path, 'rb').read() # Note that we materialize the distorted_image_data as a numpy array before # sending running inference on the image. This involves 2 memory copies and # might be optimized in other implementations. distorted_image_data = sess.run(distorted_image, {input_jpeg_tensor: jpeg_data}) bottleneck_values = sess.run(bottleneck_tensor, {resized_input_tensor: distorted_image_data}) bottleneck_values = np.squeeze(bottleneck_values) bottlenecks.append(bottleneck_values) ground_truths.append(label_index) return bottlenecks, ground_truths