text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Creates the operations to apply the specified distortions. <END_TASK> <USER_TASK:> Description: def add_input_distortions(flip_left_right, random_crop, random_scale, random_brightness, module_spec): """Creates the operations to apply the specified distortions. During training it can help to improve the results if we run the images through simple distortions like crops, scales, and flips. These reflect the kind of variations we expect in the real world, and so can help train the model to cope with natural data more effectively. Here we take the supplied parameters and construct a network of operations to apply them to an image. Cropping ~~~~~~~~ Cropping is done by placing a bounding box at a random position in the full image. The cropping parameter controls the size of that box relative to the input image. If it's zero, then the box is the same size as the input and no cropping is performed. If the value is 50%, then the crop box will be half the width and height of the input. In a diagram it looks like this: < width > +---------------------+ | | | width - crop% | | < > | | +------+ | | | | | | | | | | | | | | +------+ | | | | | +---------------------+ Scaling ~~~~~~~ Scaling is a lot like cropping, except that the bounding box is always centered and its size varies randomly within the given range. For example if the scale percentage is zero, then the bounding box is the same size as the input and no scaling is applied. If it's 50%, then the bounding box will be in a random range between half the width and height and full size. Args: flip_left_right: Boolean whether to randomly mirror images horizontally. random_crop: Integer percentage setting the total margin used around the crop box. random_scale: Integer percentage of how much to vary the scale by. random_brightness: Integer range to randomly multiply the pixel values by. graph. module_spec: The hub.ModuleSpec for the image module being used. Returns: The jpeg input layer and the distorted result tensor. """
input_height, input_width = hub.get_expected_image_size(module_spec) input_depth = hub.get_num_image_channels(module_spec) jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput') decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth) # Convert from full range of uint8 to range [0,1] of float32. decoded_image_as_float = tf.image.convert_image_dtype(decoded_image, tf.float32) decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0) margin_scale = 1.0 + (random_crop / 100.0) resize_scale = 1.0 + (random_scale / 100.0) margin_scale_value = tf.constant(margin_scale) resize_scale_value = tf.random_uniform(shape=[], minval=1.0, maxval=resize_scale) scale_value = tf.multiply(margin_scale_value, resize_scale_value) precrop_width = tf.multiply(scale_value, input_width) precrop_height = tf.multiply(scale_value, input_height) precrop_shape = tf.stack([precrop_height, precrop_width]) precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32) precropped_image = tf.image.resize_bilinear(decoded_image_4d, precrop_shape_as_int) precropped_image_3d = tf.squeeze(precropped_image, axis=[0]) cropped_image = tf.random_crop(precropped_image_3d, [input_height, input_width, input_depth]) if flip_left_right: flipped_image = tf.image.random_flip_left_right(cropped_image) else: flipped_image = cropped_image brightness_min = 1.0 - (random_brightness / 100.0) brightness_max = 1.0 + (random_brightness / 100.0) brightness_value = tf.random_uniform(shape=[], minval=brightness_min, maxval=brightness_max) brightened_image = tf.multiply(flipped_image, brightness_value) distort_result = tf.expand_dims(brightened_image, 0, name='DistortResult') return jpeg_data, distort_result
<SYSTEM_TASK:> Adds a new softmax and fully-connected layer for training and eval. <END_TASK> <USER_TASK:> Description: def add_final_retrain_ops(class_count, final_tensor_name, bottleneck_tensor, quantize_layer, is_training): """Adds a new softmax and fully-connected layer for training and eval. We need to retrain the top layer to identify our new classes, so this function adds the right operations to the graph, along with some variables to hold the weights, and then sets up all the gradients for the backward pass. The set up for the softmax and fully-connected layers is based on: https://www.tensorflow.org/tutorials/mnist/beginners/index.html Args: class_count: Integer of how many categories of things we're trying to recognize. final_tensor_name: Name string for the new final node that produces results. bottleneck_tensor: The output of the main CNN graph. quantize_layer: Boolean, specifying whether the newly added layer should be instrumented for quantization with TF-Lite. is_training: Boolean, specifying whether the newly add layer is for training or eval. Returns: The tensors for the training and cross entropy results, and tensors for the bottleneck input and ground truth input. """
batch_size, bottleneck_tensor_size = bottleneck_tensor.get_shape().as_list() assert batch_size is None, 'We want to work with arbitrary batch size.' with tf.name_scope('input'): bottleneck_input = tf.placeholder_with_default( bottleneck_tensor, shape=[batch_size, bottleneck_tensor_size], name='BottleneckInputPlaceholder') ground_truth_input = tf.placeholder( tf.int64, [batch_size], name='GroundTruthInput') # Organizing the following ops so they are easier to see in TensorBoard. layer_name = 'final_retrain_ops' with tf.name_scope(layer_name): with tf.name_scope('weights'): initial_value = tf.truncated_normal( [bottleneck_tensor_size, class_count], stddev=0.001) layer_weights = tf.Variable(initial_value, name='final_weights') variable_summaries(layer_weights) with tf.name_scope('biases'): layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases') variable_summaries(layer_biases) with tf.name_scope('Wx_plus_b'): logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases tf.summary.histogram('pre_activations', logits) final_tensor = tf.nn.softmax(logits, name=final_tensor_name) # The tf.contrib.quantize functions rewrite the graph in place for # quantization. The imported model graph has already been rewritten, so upon # calling these rewrites, only the newly added final layer will be # transformed. if quantize_layer: if is_training: tf.contrib.quantize.create_training_graph() else: tf.contrib.quantize.create_eval_graph() tf.summary.histogram('activations', final_tensor) # If this is an eval graph, we don't need to add loss ops or an optimizer. if not is_training: return None, None, bottleneck_input, ground_truth_input, final_tensor with tf.name_scope('cross_entropy'): cross_entropy_mean = tf.losses.sparse_softmax_cross_entropy( labels=ground_truth_input, logits=logits) tf.summary.scalar('cross_entropy', cross_entropy_mean) with tf.name_scope('train'): optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate) train_step = optimizer.minimize(cross_entropy_mean) return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input, final_tensor)
<SYSTEM_TASK:> Inserts the operations we need to evaluate the accuracy of our results. <END_TASK> <USER_TASK:> Description: def add_evaluation_step(result_tensor, ground_truth_tensor): """Inserts the operations we need to evaluate the accuracy of our results. Args: result_tensor: The new final node that produces results. ground_truth_tensor: The node we feed ground truth data into. Returns: Tuple of (evaluation step, prediction). """
with tf.name_scope('accuracy'): with tf.name_scope('correct_prediction'): prediction = tf.argmax(result_tensor, 1) correct_prediction = tf.equal(prediction, ground_truth_tensor) with tf.name_scope('accuracy'): evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) tf.summary.scalar('accuracy', evaluation_step) return evaluation_step, prediction
<SYSTEM_TASK:> Runs a final evaluation on an eval graph using the test data set. <END_TASK> <USER_TASK:> Description: def run_final_eval(train_session, module_spec, class_count, image_lists, jpeg_data_tensor, decoded_image_tensor, resized_image_tensor, bottleneck_tensor): """Runs a final evaluation on an eval graph using the test data set. Args: train_session: Session for the train graph with the tensors below. module_spec: The hub.ModuleSpec for the image module being used. class_count: Number of classes image_lists: OrderedDict of training images for each label. jpeg_data_tensor: The layer to feed jpeg image data into. decoded_image_tensor: The output of decoding and resizing the image. resized_image_tensor: The input node of the recognition graph. bottleneck_tensor: The bottleneck output layer of the CNN graph. """
test_bottlenecks, test_ground_truth, test_filenames = ( get_random_cached_bottlenecks(train_session, image_lists, FLAGS.test_batch_size, 'testing', FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor, decoded_image_tensor, resized_image_tensor, bottleneck_tensor, FLAGS.tfhub_module)) (eval_session, _, bottleneck_input, ground_truth_input, evaluation_step, prediction) = build_eval_session(module_spec, class_count) test_accuracy, predictions = eval_session.run( [evaluation_step, prediction], feed_dict={ bottleneck_input: test_bottlenecks, ground_truth_input: test_ground_truth }) tf.logging.info('Final test accuracy = %.1f%% (N=%d)' % (test_accuracy * 100, len(test_bottlenecks))) if FLAGS.print_misclassified_test_images: tf.logging.info('=== MISCLASSIFIED TEST IMAGES ===') for i, test_filename in enumerate(test_filenames): if predictions[i] != test_ground_truth[i]: tf.logging.info('%70s %s' % (test_filename, list(image_lists.keys())[predictions[i]]))
<SYSTEM_TASK:> Builds an restored eval session without train operations for exporting. <END_TASK> <USER_TASK:> Description: def build_eval_session(module_spec, class_count): """Builds an restored eval session without train operations for exporting. Args: module_spec: The hub.ModuleSpec for the image module being used. class_count: Number of classes Returns: Eval session containing the restored eval graph. The bottleneck input, ground truth, eval step, and prediction tensors. """
# If quantized, we need to create the correct eval graph for exporting. eval_graph, bottleneck_tensor, resized_input_tensor, wants_quantization = ( create_module_graph(module_spec)) eval_sess = tf.Session(graph=eval_graph) with eval_graph.as_default(): # Add the new layer for exporting. (_, _, bottleneck_input, ground_truth_input, final_tensor) = add_final_retrain_ops( class_count, FLAGS.final_tensor_name, bottleneck_tensor, wants_quantization, is_training=False) # Now we need to restore the values from the training graph to the eval # graph. tf.train.Saver().restore(eval_sess, CHECKPOINT_NAME) evaluation_step, prediction = add_evaluation_step(final_tensor, ground_truth_input) return (eval_sess, resized_input_tensor, bottleneck_input, ground_truth_input, evaluation_step, prediction)
<SYSTEM_TASK:> Saves an graph to file, creating a valid quantized one if necessary. <END_TASK> <USER_TASK:> Description: def save_graph_to_file(graph_file_name, module_spec, class_count): """Saves an graph to file, creating a valid quantized one if necessary."""
sess, _, _, _, _, _ = build_eval_session(module_spec, class_count) graph = sess.graph output_graph_def = tf.graph_util.convert_variables_to_constants( sess, graph.as_graph_def(), [FLAGS.final_tensor_name]) with tf.gfile.GFile(graph_file_name, 'wb') as f: f.write(output_graph_def.SerializeToString())
<SYSTEM_TASK:> Adds operations that perform JPEG decoding and resizing to the graph.. <END_TASK> <USER_TASK:> Description: def add_jpeg_decoding(module_spec): """Adds operations that perform JPEG decoding and resizing to the graph.. Args: module_spec: The hub.ModuleSpec for the image module being used. Returns: Tensors for the node to feed JPEG data into, and the output of the preprocessing steps. """
input_height, input_width = hub.get_expected_image_size(module_spec) input_depth = hub.get_num_image_channels(module_spec) jpeg_data = tf.placeholder(tf.string, name='DecodeJPGInput') decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth) # Convert from full range of uint8 to range [0,1] of float32. decoded_image_as_float = tf.image.convert_image_dtype(decoded_image, tf.float32) decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0) resize_shape = tf.stack([input_height, input_width]) resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32) resized_image = tf.image.resize_bilinear(decoded_image_4d, resize_shape_as_int) return jpeg_data, resized_image
<SYSTEM_TASK:> Exports model for serving. <END_TASK> <USER_TASK:> Description: def export_model(module_spec, class_count, saved_model_dir): """Exports model for serving. Args: module_spec: The hub.ModuleSpec for the image module being used. class_count: The number of classes. saved_model_dir: Directory in which to save exported model and variables. """
# The SavedModel should hold the eval graph. sess, in_image, _, _, _, _ = build_eval_session(module_spec, class_count) with sess.graph.as_default() as graph: tf.saved_model.simple_save( sess, saved_model_dir, inputs={'image': in_image}, outputs={'prediction': graph.get_tensor_by_name('final_result:0')}, legacy_init_op=tf.group(tf.tables_initializer(), name='legacy_init_op') )
<SYSTEM_TASK:> Returns expected num_channels dimensions of an image input. <END_TASK> <USER_TASK:> Description: def get_num_image_channels(module_or_spec, signature=None, input_name=None): """Returns expected num_channels dimensions of an image input. This is for advanced users only who expect to handle modules with image inputs that might not have the 3 usual RGB channels. Args: module_or_spec: a Module or ModuleSpec that accepts image inputs. signature: a string with the key of the signature in question. If None, the default signature is used. input_name: a string with the input name for images. If None, the conventional input name `images` for the default signature is used. Returns: An integer with the number of input channels to the module. Raises: ValueError: If the channel information is missing or malformed. """
if input_name is None: input_name = "images" input_info_dict = module_or_spec.get_input_info_dict(signature) try: shape = input_info_dict[input_name].get_shape() except KeyError: raise ValueError("Module is missing input '%s' in signature '%s'." % (input_name, signature or "default")) try: _, _, _, num_channels = shape.as_list() if num_channels is None: raise ValueError except ValueError: raise ValueError( "Shape of module input is %s, " "expected [batch_size, height, width, num_channels] " "with known num_channels" % shape) return num_channels
<SYSTEM_TASK:> Returns whether x is a SparseTensor or a parsed sparse tensor info. <END_TASK> <USER_TASK:> Description: def _is_sparse(x): """Returns whether x is a SparseTensor or a parsed sparse tensor info."""
return ( isinstance(x, (tf.SparseTensor, tf_v1.SparseTensorValue)) or (hasattr(x, "is_sparse") and x.is_sparse))
<SYSTEM_TASK:> Converts `value` into a tensor that can be feed into `tensor_info`. <END_TASK> <USER_TASK:> Description: def _convert_to_compatible_tensor(value, target, error_prefix): """Converts `value` into a tensor that can be feed into `tensor_info`. Args: value: A value to convert into Tensor or SparseTensor. target: An object returned by `parse_tensor_info_map`. error_prefix: A string to prefix on raised TypeErrors. Raises: TypeError: If it fails to convert. Returns: A Tensor or SparseTensor compatible with tensor_info. """
try: tensor = tf_v1.convert_to_tensor_or_indexed_slices(value, target.dtype) except TypeError as e: raise TypeError("%s: %s" % (error_prefix, e)) if _is_sparse(tensor) != _is_sparse(target): if _is_sparse(tensor): raise TypeError("%s: Is sparse. Expected dense." % error_prefix) else: raise TypeError("%s: Is dense. Expected sparse." % error_prefix) if not tensor.get_shape().is_compatible_with(target.get_shape()): raise TypeError("%s: Shape %r is incompatible with %r" % (error_prefix, tensor.get_shape(), target.get_shape())) return tensor
<SYSTEM_TASK:> Converts dict `values` in tensors that are compatible with `targets`. <END_TASK> <USER_TASK:> Description: def convert_dict_to_compatible_tensor(values, targets): """Converts dict `values` in tensors that are compatible with `targets`. Args: values: A dict to objects to convert with same keys as `targets`. targets: A dict returned by `parse_tensor_info_map`. Returns: A map with the same keys as `values` but values converted into Tensor/SparseTensors that can be fed into `protomap`. Raises: TypeError: If it fails to convert. """
result = {} for key, value in sorted(values.items()): result[key] = _convert_to_compatible_tensor( value, targets[key], error_prefix="Can't convert %r" % key) return result
<SYSTEM_TASK:> Builds a map to feed tensors in `protomap` using `inputs`. <END_TASK> <USER_TASK:> Description: def build_input_map(protomap, inputs): """Builds a map to feed tensors in `protomap` using `inputs`. Args: protomap: A proto map<string,TensorInfo>. inputs: A map with same keys as `protomap` of Tensors and SparseTensors. Returns: A map from nodes refered by TensorInfo protos to corresponding input tensors. Raises: ValueError: if a TensorInfo proto is malformed or map keys do not match. """
if set(protomap.keys()) != set(inputs.keys()): raise ValueError("build_input_map: keys do not match.") input_map = {} for key, tensor_info in protomap.items(): arg = inputs[key] encoding = tensor_info.WhichOneof("encoding") if encoding == "name": input_map[tensor_info.name] = arg elif encoding == "coo_sparse": coo_sparse = tensor_info.coo_sparse input_map[coo_sparse.values_tensor_name] = arg.values input_map[coo_sparse.indices_tensor_name] = arg.indices input_map[coo_sparse.dense_shape_tensor_name] = arg.dense_shape else: raise ValueError("Invalid TensorInfo.encoding: %s" % encoding) return input_map
<SYSTEM_TASK:> Builds a map of tensors from `protomap` using `get_tensor_by_name`. <END_TASK> <USER_TASK:> Description: def build_output_map(protomap, get_tensor_by_name): """Builds a map of tensors from `protomap` using `get_tensor_by_name`. Args: protomap: A proto map<string,TensorInfo>. get_tensor_by_name: A lambda that receives a tensor name and returns a Tensor instance. Returns: A map from string to Tensor or SparseTensor instances built from `protomap` and resolving tensors using `get_tensor_by_name()`. Raises: ValueError: if a TensorInfo proto is malformed. """
def get_output_from_tensor_info(tensor_info): encoding = tensor_info.WhichOneof("encoding") if encoding == "name": return get_tensor_by_name(tensor_info.name) elif encoding == "coo_sparse": return tf.SparseTensor( get_tensor_by_name(tensor_info.coo_sparse.indices_tensor_name), get_tensor_by_name(tensor_info.coo_sparse.values_tensor_name), get_tensor_by_name(tensor_info.coo_sparse.dense_shape_tensor_name)) else: raise ValueError("Invalid TensorInfo.encoding: %s" % encoding) return { key: get_output_from_tensor_info(tensor_info) for key, tensor_info in protomap.items() }
<SYSTEM_TASK:> Parses a line of a text embedding file. <END_TASK> <USER_TASK:> Description: def parse_line(line): """Parses a line of a text embedding file. Args: line: (str) One line of the text embedding file. Returns: A token string and its embedding vector in floats. """
columns = line.split() token = columns.pop(0) values = [float(column) for column in columns] return token, values
<SYSTEM_TASK:> Loads a text embedding into memory as a numpy matrix. <END_TASK> <USER_TASK:> Description: def load(file_path, parse_line_fn): """Loads a text embedding into memory as a numpy matrix. Args: file_path: Path to the text embedding file. parse_line_fn: callback function to parse each file line. Returns: A tuple of (list of vocabulary tokens, numpy matrix of embedding vectors). Raises: ValueError: if the data in the sstable is inconsistent. """
vocabulary = [] embeddings = [] embeddings_dim = None for line in tf.gfile.GFile(file_path): token, embedding = parse_line_fn(line) if not embeddings_dim: embeddings_dim = len(embedding) elif embeddings_dim != len(embedding): raise ValueError( "Inconsistent embedding dimension detected, %d != %d for token %s", embeddings_dim, len(embedding), token) vocabulary.append(token) embeddings.append(embedding) return vocabulary, np.array(embeddings)
<SYSTEM_TASK:> Makes a module spec to simply perform token to embedding lookups. <END_TASK> <USER_TASK:> Description: def make_module_spec(vocabulary_file, vocab_size, embeddings_dim, num_oov_buckets, preprocess_text): """Makes a module spec to simply perform token to embedding lookups. Input of this module is a 1-D list of string tokens. For T tokens input and an M dimensional embedding table, the lookup result is a [T, M] shaped Tensor. Args: vocabulary_file: Text file where each line is a key in the vocabulary. vocab_size: The number of tokens contained in the vocabulary. embeddings_dim: The embedding dimension. num_oov_buckets: The number of out-of-vocabulary buckets. preprocess_text: Whether to preprocess the input tensor by removing punctuation and splitting on spaces. Returns: A module spec object used for constructing a TF-Hub module. """
def module_fn(): """Spec function for a token embedding module.""" tokens = tf.placeholder(shape=[None], dtype=tf.string, name="tokens") embeddings_var = tf.get_variable( initializer=tf.zeros([vocab_size + num_oov_buckets, embeddings_dim]), name=EMBEDDINGS_VAR_NAME, dtype=tf.float32) lookup_table = tf.contrib.lookup.index_table_from_file( vocabulary_file=vocabulary_file, num_oov_buckets=num_oov_buckets, ) ids = lookup_table.lookup(tokens) combined_embedding = tf.nn.embedding_lookup(params=embeddings_var, ids=ids) hub.add_signature("default", {"tokens": tokens}, {"default": combined_embedding}) def module_fn_with_preprocessing(): """Spec function for a full-text embedding module with preprocessing.""" sentences = tf.placeholder(shape=[None], dtype=tf.string, name="sentences") # Perform a minimalistic text preprocessing by removing punctuation and # splitting on spaces. normalized_sentences = tf.regex_replace( input=sentences, pattern=r"\pP", rewrite="") tokens = tf.string_split(normalized_sentences, " ") embeddings_var = tf.get_variable( initializer=tf.zeros([vocab_size + num_oov_buckets, embeddings_dim]), name=EMBEDDINGS_VAR_NAME, dtype=tf.float32) lookup_table = tf.contrib.lookup.index_table_from_file( vocabulary_file=vocabulary_file, num_oov_buckets=num_oov_buckets, ) sparse_ids = tf.SparseTensor( indices=tokens.indices, values=lookup_table.lookup(tokens.values), dense_shape=tokens.dense_shape) # In case some of the input sentences are empty before or after # normalization, we will end up with empty rows. We do however want to # return embedding for every row, so we have to fill in the empty rows with # a default. sparse_ids, _ = tf.sparse_fill_empty_rows( sparse_ids, lookup_table.lookup(tf.constant(""))) # In case all of the input sentences are empty before or after # normalization, we will end up with a SparseTensor with shape [?, 0]. After # filling in the empty rows we must ensure the shape is set properly to # [?, 1]. At this point, there are no empty rows, so the new shape will be # [sparse_ids.dense_shape[0], max(1, sparse_ids.dense_shape[1])]. sparse_ids = tf.sparse_reset_shape(sparse_ids) combined_embedding = tf.nn.embedding_lookup_sparse( params=embeddings_var, sp_ids=sparse_ids, sp_weights=None, combiner="sqrtn") hub.add_signature("default", {"sentences": sentences}, {"default": combined_embedding}) if preprocess_text: return hub.create_module_spec(module_fn_with_preprocessing) else: return hub.create_module_spec(module_fn)
<SYSTEM_TASK:> Exports a TF-Hub module that performs embedding lookups. <END_TASK> <USER_TASK:> Description: def export(export_path, vocabulary, embeddings, num_oov_buckets, preprocess_text): """Exports a TF-Hub module that performs embedding lookups. Args: export_path: Location to export the module. vocabulary: List of the N tokens in the vocabulary. embeddings: Numpy array of shape [N+K,M] the first N rows are the M dimensional embeddings for the respective tokens and the next K rows are for the K out-of-vocabulary buckets. num_oov_buckets: How many out-of-vocabulary buckets to add. preprocess_text: Whether to preprocess the input tensor by removing punctuation and splitting on spaces. """
# Write temporary vocab file for module construction. tmpdir = tempfile.mkdtemp() vocabulary_file = os.path.join(tmpdir, "tokens.txt") with tf.gfile.GFile(vocabulary_file, "w") as f: f.write("\n".join(vocabulary)) vocab_size = len(vocabulary) embeddings_dim = embeddings.shape[1] spec = make_module_spec(vocabulary_file, vocab_size, embeddings_dim, num_oov_buckets, preprocess_text) try: with tf.Graph().as_default(): m = hub.Module(spec) # The embeddings may be very large (e.g., larger than the 2GB serialized # Tensor limit). To avoid having them frozen as constant Tensors in the # graph we instead assign them through the placeholders and feed_dict # mechanism. p_embeddings = tf.placeholder(tf.float32) load_embeddings = tf.assign(m.variable_map[EMBEDDINGS_VAR_NAME], p_embeddings) with tf.Session() as sess: sess.run([load_embeddings], feed_dict={p_embeddings: embeddings}) m.export(export_path, sess) finally: shutil.rmtree(tmpdir)
<SYSTEM_TASK:> Adds zero vectors for oov buckets if num_oov_buckets > 0. <END_TASK> <USER_TASK:> Description: def maybe_append_oov_vectors(embeddings, num_oov_buckets): """Adds zero vectors for oov buckets if num_oov_buckets > 0. Since we are assigning zero vectors, adding more that one oov bucket is only meaningful if we perform fine-tuning. Args: embeddings: Embeddings to extend. num_oov_buckets: Number of OOV buckets in the extended embedding. """
num_embeddings = np.shape(embeddings)[0] embedding_dim = np.shape(embeddings)[1] embeddings.resize( [num_embeddings + num_oov_buckets, embedding_dim], refcheck=False)
<SYSTEM_TASK:> Register a Module to be exported under `export_name`. <END_TASK> <USER_TASK:> Description: def register_module_for_export(module, export_name): """Register a Module to be exported under `export_name`. This function registers `module` to be exported by `LatestModuleExporter` under a subdirectory named `export_name`. Note that `export_name` must be unique for each module exported from the current graph. It only controls the export subdirectory name and it has no scope effects such as the `name` parameter during Module instantiation. Args: module: Module instance to be exported. export_name: subdirectory name to use when performing the export. Raises: ValueError: if `export_name` is already taken in the current graph. """
for used_name, _ in tf_v1.get_collection(_EXPORT_MODULES_COLLECTION): if used_name == export_name: raise ValueError( "There is already a module registered to be exported as %r" % export_name) tf_v1.add_to_collection(_EXPORT_MODULES_COLLECTION, (export_name, module))
<SYSTEM_TASK:> Returns a session constructed using `estimator` and `serving_input_fn`. <END_TASK> <USER_TASK:> Description: def _make_estimator_serving_session(estimator, serving_input_fn, checkpoint_path): """Returns a session constructed using `estimator` and `serving_input_fn`. The Estimator API does not provide an API to construct a graph and session, making it necessary for this function to replicate how an estimator builds a graph. This code is based on `Estimator.export_savedmodel` (another function that has to replicate how an estimator builds a graph). Args: estimator: tf.Estimator to use when constructing the session. serving_input_fn: A function that takes no arguments and returns a `ServingInputReceiver`. It is used to construct the session. checkpoint_path: The checkpoint path to restore in the session. Must not be None. """
with tf.Graph().as_default() as g: mode = tf_v1.estimator.ModeKeys.PREDICT tf_v1.train.create_global_step(g) tf_v1.set_random_seed(estimator.config.tf_random_seed) serving_input_receiver = serving_input_fn() estimator_spec = estimator.model_fn( features=serving_input_receiver.features, labels=None, mode=mode, config=estimator.config) # pylint: disable=protected-access # Note that MonitoredSession(), despite the name is not a Session, and # can't be used to export Modules as one can't use them with Savers. # As so this must use a raw tf.Session(). session = tf_v1.Session(config=estimator._session_config) # pylint: enable=protected-access with session.as_default(): # TODO(b/71839662): Consider if this needs to support TPUEstimatorSpec # which does not have a scaffold member. saver_for_restore = estimator_spec.scaffold.saver or tf_v1.train.Saver( sharded=True) saver_for_restore.restore(session, checkpoint_path) return session
<SYSTEM_TASK:> Creates a ModuleSpec from a function that builds the module's graph. <END_TASK> <USER_TASK:> Description: def create_module_spec(module_fn, tags_and_args=None, drop_collections=None): """Creates a ModuleSpec from a function that builds the module's graph. The `module_fn` is called on a new graph (not the current one) to build the graph of the module and define its signatures via `hub.add_signature()`. Example: ```python # Define a text embedding module. def my_text_module_fn(): text_input = tf.placeholder(dtype=tf.string, shape=[None]) embeddings = compute_embedding(text_input) hub.add_signature(inputs=text_input, outputs=embeddings) ``` See `add_signature()` for documentation on adding multiple input/output signatures. NOTE: In anticipation of future TF-versions, `module_fn` is called on a graph that uses resource variables by default. If you want old-style variables then you can use `with tf.variable_scope("", use_resource=False)` in `module_fn`. Multiple graph variants can be defined by using the `tags_and_args` argument. For example, the code: ```python hub.create_module_spec( module_fn, tags_and_args=[({"train"}, {"is_training":True}), (set(), {"is_training":False})]) ``` calls `module_fn` twice, once as `module_fn(is_training=True)` and once as `module_fn(is_training=False)` to define the respective graph variants: for training with tags {"train"} and for inference with the empty set of tags. Using the empty set aligns the inference case with the default in Module.__init__(). Args: module_fn: a function to build a graph for the Module. tags_and_args: Optional list of tuples (tags, kwargs) of tags and keyword args used to define graph variants. If omitted, it is interpreted as [(set(), {})], meaning `module_fn` is called once with no args. drop_collections: list of collection to drop. Returns: A ModuleSpec. Raises: ValueError: if it fails to construct the ModuleSpec due to bad or unsupported values in the arguments or in the graphs constructed by `module_fn`. """
if not drop_collections: drop_collections = [] report_tags = True if not tags_and_args: tags_and_args = [(set(), {})] report_tags = False saved_model_handler = saved_model_lib.SavedModelHandler() for tags, args in tags_and_args: with tf.Graph().as_default() as graph: with tf_v1.variable_scope("", use_resource=True): module_fn(**args) for collection_key in drop_collections: del tf_v1.get_collection_ref(collection_key)[:] err = find_state_op_colocation_error(graph, tags if report_tags else None) if err: raise ValueError(err) saved_model_handler.add_graph_copy(graph, tags=tags) return _ModuleSpec(saved_model_handler, checkpoint_variables_path=None)
<SYSTEM_TASK:> Adds a signature to the module definition. <END_TASK> <USER_TASK:> Description: def add_signature(name=None, inputs=None, outputs=None): """Adds a signature to the module definition. NOTE: This must be called within a `module_fn` that is defining a Module. Args: name: Signature name as a string. If omitted, it is interpreted as 'default' and is the signature used when `Module.__call__` `signature` is not specified. inputs: A dict from input name to Tensor or SparseTensor to feed when applying the signature. If a single tensor is passed, it is interpreted as a dict with a single 'default' entry. outputs: A dict from output name to Tensor or SparseTensor to return from applying the signature. If a single tensor is passed, it is interpreted as a dict with a single 'default' entry. Raises: ValueError: if the arguments are invalid. """
if not name: name = "default" if inputs is None: inputs = {} if outputs is None: outputs = {} if not isinstance(inputs, dict): inputs = {"default": inputs} if not isinstance(outputs, dict): outputs = {"default": outputs} message = find_signature_inputs_from_multivalued_ops(inputs) if message: logging.error(message) message = find_signature_input_colocation_error(name, inputs) if message: raise ValueError(message) saved_model_lib.add_signature(name, inputs, outputs)
<SYSTEM_TASK:> Adds an attached message to the module definition. <END_TASK> <USER_TASK:> Description: def attach_message(key, message): """Adds an attached message to the module definition. NOTE: This must be called within a `module_fn` that is defining a Module. See ModuleSpec.get_attached_message() for an introduction to attached messages and the API for module consumers. To define a new type of attached message: * Select a reasonably descriptive name as a unique key. For now, keys must be valid Python identifiers that start with a letter. Punctuation besides underscores ('_') is reserved for future use in hierarchical names. * Define a Protocol Buffer message type to store the value for the key. (Use generic containers like google.protobuf.Value only if running the protocol compiler is infeasible for your build process.) * For module consumers, consider providing a small library that encapsulates the specific call to get_attached_message() behind a higher-level interface and supplies the right message type for parsing. Attached messages work best for few messages of moderate size. Avoid a large number of messages; use repetition within messages instead. Avoid large messages (megabytes); consider module assets instead. For modules with multiple graph versions, each graph version stores separately what was attached from within the call to `module_fn` that defines its graph. Args: key: A string with the unique key to retrieve this message. Must start with a letter and contain only letters, digits and underscores. If used repeatedly within one invocation of `module_fn`, then only the message from the final call will be returned by `get_attached_message()`. message: A protocol message object, to be stored in serialized form. Raises: ValueError: if `key` is not a string of the form of a Python identifier. """
if not re.match(r"[a-zA-Z][a-zA-Z0-9_]*$", key): raise ValueError( "hub.attach_message() called with malformed key '%s'" % key) saved_model_lib.attach_bytes(key, message.SerializeToString())
<SYSTEM_TASK:> Returns set of registered stateful ops that do not expect inputs. <END_TASK> <USER_TASK:> Description: def list_registered_stateful_ops_without_inputs(): """Returns set of registered stateful ops that do not expect inputs. This list is used to identify the ops to be included in the state-graph and that are subsequently fed into the apply-graphs. Returns: A set of strings. """
return set([ name for name, op in op_def_registry.get_registered_ops().items() if op.is_stateful and not op.input_arg ])
<SYSTEM_TASK:> Returns a map from tensor names to tensors that hold the state. <END_TASK> <USER_TASK:> Description: def get_state_map(meta_graph, state_ops, unsupported_state_ops, get_tensor_by_name): """Returns a map from tensor names to tensors that hold the state."""
state_map = {} for node in meta_graph.graph_def.node: if node.op in state_ops: tensor_name = node.name + ":0" tensor = get_tensor_by_name(tensor_name) num_outputs = len(tensor.op.outputs) if num_outputs != 1: raise ValueError("Stateful op %s has %d outputs, expected 1" % (node.op, num_outputs)) state_map[tensor_name] = tensor if node.op in unsupported_state_ops: raise ValueError("Unsupported stateful op: %s" % node.op) return state_map
<SYSTEM_TASK:> Replaces state ops with non state Placeholder ops for the apply graph. <END_TASK> <USER_TASK:> Description: def replace_apply_state(meta_graph, state_ops, feed_map): """Replaces state ops with non state Placeholder ops for the apply graph."""
for node in meta_graph.graph_def.node: keys_to_purge = [] tensor_name = node.name + ":0" # Verify that the node is a state op and that its due to be rewired # in the feedmap. if node.op in state_ops and tensor_name in feed_map: node.op = "Placeholder" for key in node.attr: # Only shape and dtype are required for Placeholder. Remove other # attributes. if key != "shape": keys_to_purge.append(key) for key in keys_to_purge: del node.attr[key] node.attr["dtype"].type = types_pb2.DT_RESOURCE
<SYSTEM_TASK:> Matches a variable to individual parts. <END_TASK> <USER_TASK:> Description: def _extract_variable_parts(variable_key, variable): """Matches a variable to individual parts. Args: variable_key: String identifier of the variable in the module scope. variable: Variable tensor. Returns: partitioned: Whether the variable is partitioned. name: Name of the variable up to the partitioning. offset: Offset of the variable into the full variable. Raises: RuntimeError: In case of unexpected variable format. """
name, offset, partitioned = None, None, False # pylint: disable=protected-access if variable._save_slice_info: name = variable_key[:variable_key.rfind("/")] if not variable._save_slice_info.full_name.endswith(name): raise RuntimeError("Unexpected handling of partitioned variable.") offset = variable._save_slice_info.var_offset[0] partitioned = True # pylint: enable=protected-access return partitioned, name, offset
<SYSTEM_TASK:> Builds a proper variable map if it contains PartitionedVariables. <END_TASK> <USER_TASK:> Description: def recover_partitioned_variable_map(var_node_map): """Builds a proper variable map if it contains PartitionedVariables. Args: var_node_map: A map to tf.Variables. PartitionedVariables show up in this map as N entries with keys "<var_name>/part_n". Returns: A map to tf.Variables or to list of tf.Variables for each PartitionedVariables in `var_node_map`. Raises: RuntimeError: if there are issues recovering the PartitionedVariables. """
offset_variables_map = {} for var_key, var_tensor in var_node_map.items(): match, var_name, offset = _extract_variable_parts(var_key, var_tensor) if not match: # This is a standard variable, so we can safely add it to the output. if var_key in offset_variables_map: raise RuntimeError( "Variable %s exists both as a single and partitioned variable.") offset_variables_map[var_key] = var_tensor continue if var_name not in offset_variables_map: offset_variables_map[var_name] = {} elif not isinstance(offset_variables_map[var_name], dict): raise RuntimeError( "Variable %s exists both as a single and partitioned variable.") # Duplicated variable offsets should not exist. if offset in offset_variables_map[var_name]: raise RuntimeError( "Variable map contains duplicate offset %d for variable [%s]" % (offset, var_name)) offset_variables_map[var_name][offset] = var_tensor variables_map = {} # Use offsets for sorting, then strip them from the dictionary and keep only # a list of variables per each variable name. for var_name, var_value in offset_variables_map.items(): if not isinstance(var_value, dict): variables_map[var_name] = var_value continue shapes = [var_tensor.shape[1:] for var_tensor in var_value.values()] if not all(shape == shapes[0] for shape in shapes): raise RuntimeError("Shapes not compatible: %s" % (shapes)) for _, tensor in sorted(var_value.items()): variables_map[var_name] = [ tensor for _, tensor in sorted(var_value.items()) ] return variables_map
<SYSTEM_TASK:> Checks that tag list contains each set of tags only once. <END_TASK> <USER_TASK:> Description: def check_unique_tags(tag_list): """Checks that tag list contains each set of tags only once."""
frozen_tags_seen = set() for tags in tag_list: frozen_tags = frozenset(tags) if frozen_tags in frozen_tags_seen: raise ValueError("Tags %r used repeatedly" % tags) frozen_tags_seen.add(frozen_tags)
<SYSTEM_TASK:> Checks that SavedModelHandler only uses supported collections. <END_TASK> <USER_TASK:> Description: def check_collections_are_supported(saved_model_handler, supported): """Checks that SavedModelHandler only uses supported collections."""
for meta_graph in saved_model_handler.meta_graphs: used_collection_keys = set(meta_graph.collection_def.keys()) unsupported = used_collection_keys - supported if unsupported: raise ValueError("Unsupported collections in graph: %s\n" "Use hub.create_module_spec(..., drop_collections=[...])" " as appropriate." % list(unsupported))
<SYSTEM_TASK:> Register graph ops absent in op_def_registry, if present in c++ registry. <END_TASK> <USER_TASK:> Description: def register_ops_if_needed(graph_ops): """Register graph ops absent in op_def_registry, if present in c++ registry. Args: graph_ops: set with graph op names to register. Raises: RuntimeError: if `graph_ops` contains ops that are not in either python or c++ registry. """
missing_ops = graph_ops - set(op_def_registry.get_registered_ops().keys()) if not missing_ops: return p_buffer = c_api.TF_GetAllOpList() cpp_op_list = op_def_pb2.OpList() cpp_op_list.ParseFromString(c_api.TF_GetBuffer(p_buffer)) cpp_registry_ops = {op.name: op for op in cpp_op_list.op} missing_op_list = op_def_pb2.OpList() for missing_op in missing_ops: if missing_op not in cpp_registry_ops: logging.info( "Op %s is missing from both the python and C++ registry.", missing_op) else: missing_op_list.op.extend([cpp_registry_ops[missing_op]]) logging.info( "Adding op %s from c++ registry to python registry.", missing_op) op_def_registry.register_op_list(missing_op_list) # Note: Only raise missing op ValueError after trying to load ops. # This allows the test to exercise all the calls into TensorFlow # without having to write a C + python test. if not missing_ops <= set(cpp_registry_ops.keys()): raise RuntimeError( "Graph ops missing from the python registry (%s) are also absent from " "the c++ registry." % missing_ops.difference(set(cpp_registry_ops.keys())))
<SYSTEM_TASK:> Fixes colocation attributes after import according to input_map. <END_TASK> <USER_TASK:> Description: def fix_colocation_after_import(input_map, absolute_import_scope): """Fixes colocation attributes after import according to input_map. This function is meant to be called after importing a GraphDef, in order to rewrite colocate_with constrains analogous to how inputs to ops are rewritten by input_map during import. It also updates devices accordingly. The nodes in the given import scope of the current default graph have their colocation attributes (that is, the "loc:@..." values in the "_class" attr) rewritten as follows: If, before the call, op x has attribute loc:@y, and `input_map` replaces an output of y with an output of z, then loc:@y gets replaced by the colocation attributes of z (that is, loc:@z, if no other constraints are in play). This style of rewriting imposes the following requirements: * If an output of node y is an input tensor in a signature of the module, y must not have any colocation attributes on it, such that colocations with y are expressed by loc:@y and can be adjusted with a rewriting rule for it. Function `find_signature_input_colocation_error()` checks this during module creation. * If y1 is a state node, its colocation constraints must only reference other state nodes, say, y2. Since all outputs of state nodes are mapped the same way, all their rewriting rules together will do the same thing. Function `find_state_op_colocation_error()` checks this during module creation. * Other nodes may have arbitrary colocation attributes. Mapping of inputs works with tensors, while colocation constraints work with ops. Issues may arise when mapping tensors from ops with multiple outputs. If the outputs of y are replaced by outputs of distinct ops z1, z2, ..., rewriting of loc:@y becomes ambiguous unless z1, z2, ... have equal colocation_groups) If some but not all outputs of y are replaced, it becomes ambiguous whether to rewrite loc:@y at all. For now, this is handled conservatively by raising an error (instead of rewriting to the union of all applicable constraints). This should be very rare: all state ops so far have single outputs (and even if not, the rewriting would be consistent); input ops usually are placeholders, which have single outputs. Args: input_map: a dict mapping from tensor names in the imported graph to existing Tensors, typically the same as passed to tf.import_graph_def(). absolute_import_scope: a string with the full name of the import scope, comprising the current scope when import_graph_def() as called plus the import_scope passed to it. Raises: ValueError: if one imported op has its multiple outputs and they are remapped in a way that causes conflicting colocation rewrites. """
attr_map = _build_colocation_attr_map(input_map, absolute_import_scope) _apply_colocation_attr_map(attr_map, absolute_import_scope)
<SYSTEM_TASK:> Returns a dict mapping from pre-import to post-import colocation attrs. <END_TASK> <USER_TASK:> Description: def _build_colocation_attr_map(input_map, absolute_import_scope): """Returns a dict mapping from pre-import to post-import colocation attrs. Args: input_map: as for fix_colocation_after_import. absolute_import_scope: as for fix_colocation_after_import. Returns: A dict that maps bytes `"loc:@" + absolute_import_scope + "/foo"` to _ConsistentValues set to the lists of bytes `["loc:@...", ...]` according to the rewriting scheme of fix_colocation_after_import. In case of an inconsistent rewriting, _ConsistentValue.has_error is true. """
colocation_attr_map = collections.defaultdict(_ConsistentValue) used_outputs_of_imported_ops = collections.defaultdict(set) # Collect mappings from the input_map. for imported_tensor_name, mapped_tensor in input_map.items(): imported_tensor_name = absolute_import_scope + "/" + imported_tensor_name imported_op_name, imported_index = _split_tensor_name(imported_tensor_name) key = tf.compat.as_bytes("loc:@" + imported_op_name) colocation_attr_map[key].Set( mapped_tensor.op.colocation_groups(), {"reason": "input '%s' is substituted by '%s'" % ( imported_tensor_name, mapped_tensor.name)}) used_outputs_of_imported_ops[imported_op_name].add(imported_index) # Add unchanged mappings for additional, non-remapped outputs of ops touched # by the input_map. For now, these just signal inconsistency when used. for imported_op_name, used_outputs in used_outputs_of_imported_ops.items(): imported_op = tf_v1.get_default_graph().get_operation_by_name( imported_op_name) unused_outputs = set(range(len(imported_op.outputs))) - used_outputs if not unused_outputs: continue key = tf.compat.as_bytes("loc:@" + imported_op_name) if imported_op.colocation_groups() != [key]: # This should never happen: state nodes are remapped fully, input nodes # are prevented from having colocation attributes. raise ValueError( "Internal error: tensors from op '%s' are partially remapped in " "import but op.colocation_groups=%s cannot be captured in a " "simple rewrite rule." % (imported_op_name, imported_op.colocation_groups())) colocation_attr_map[key].Set( [key], {"reason": "tensor '%s:%s' is not substituted by inputs" % ( imported_op_name, ",".join(str(i) for i in sorted(unused_outputs)))}) return colocation_attr_map
<SYSTEM_TASK:> Rewrites colocation constraints in the current default graph. <END_TASK> <USER_TASK:> Description: def _apply_colocation_attr_map(colocation_attr_map, absolute_import_scope): """Rewrites colocation constraints in the current default graph. Nodes in `absolute_import_scope` get their "_class" attr lists rewritten according to `colocation_attr_map`: each entry that matches a key gets replaced by the associated values (with deduplication). The node's device is updated accordingly. Args: colocation_attr_map: as returned by _build_colocation_attr_map. absolute_import_scope: as for fix_colocation_after_import. Raises: ValueError: if rewriting runs into an inconsistent value in `colocation_attr_map`. """
graph = tf_v1.get_default_graph() for op in graph.get_operations(): # Rewrite the values of the "_class" attr that store colocation constraints. # NOTE: The colocation_group loc:@X of a node with itself is not stored # explicitly as an attr, so rewrite errors for loc:@X are not triggered # by the mere existence of X. if not op.name.startswith(absolute_import_scope + "/"): continue try: class_values = op.get_attr("_class") except ValueError: continue # No _class attr found; nothing to do. new_attr_value = tf_v1.AttrValue() new_coloc_groups = [] for class_value in class_values: if class_value.startswith(tf.compat.as_bytes("loc:@")): if class_value not in colocation_attr_map: rewritten_class_value = [class_value] else: rewritten_class_value = (colocation_attr_map[ class_value].GetConsistentValueOrRaise( "Failed to rewrite colocation constraints while applying " "hub.Module:\n" "The module graph contains a node {op!r} " "that has a colocation constraint {class_value!r} " "with ambiguous rewriting {old_value!r} vs {new_value!r} " "because {old_reason} and {new_reason}, respectively.\n" "To fix, avoid publishing a module with inputs comprising " "multiple outputs of one op that is referenced in " "tf.colocate_with(...) constraints on other ops.", {"op": op.name, "class_value": class_value})) new_coloc_groups.extend(rewritten_class_value) else: new_attr_value.list.s.append(class_value) new_coloc_groups = sorted(set(new_coloc_groups)) new_attr_value.list.s.extend(new_coloc_groups) op._set_attr("_class", new_attr_value) # pylint: disable=protected-access # Mimic the code of tf.import_graph_def(): If there are colocation # constraints, use any of them to set the device (overriding what the # device function stack would do), without attempting to merge or check for # equality. If they were inconsistent, TensorFlow's C++ runtime would fail # anyways due to conflicting colocation constraints. # Note that Hub imports GraphDefs with devices cleared, so this code deals # with the result of import_graph_def, not a setting saved in the module. if new_coloc_groups: new_coloc_device = "" for new_coloc_group in new_coloc_groups: assert new_coloc_group.startswith(tf.compat.as_bytes("loc:@")) new_coloc_target_op = graph.get_operation_by_name( tf.compat.as_str_any(new_coloc_group[5:])) new_coloc_device = new_coloc_target_op.device if new_coloc_device: break # Set this, even if empty, to avoid retaining an outdated value. op._set_device(new_coloc_device)
<SYSTEM_TASK:> Returns error message for colocation of state ops, or None if ok. <END_TASK> <USER_TASK:> Description: def find_state_op_colocation_error(graph, reported_tags=None): """Returns error message for colocation of state ops, or None if ok."""
state_op_types = list_registered_stateful_ops_without_inputs() state_op_map = {op.name: op for op in graph.get_operations() if op.type in state_op_types} for op in state_op_map.values(): for colocation_group in op.colocation_groups(): if not (colocation_group.startswith(tf.compat.as_bytes("loc:@")) and tf.compat.as_str_any(colocation_group[5:]) in state_op_map): tags_prefix = ("" if reported_tags is None else "in the graph for tags %s, " % reported_tags) return ( "A state-holding node x of a module's graph (e.g., a Variable op) " "must not be subject to a tf.colocate_with(y) constraint " "unless y is also a state-holding node.\n" "Details: %snode '%s' has op '%s', which counts as state-holding, " "but Operation.colocation_groups() == %s. " % (tags_prefix, op.name, op.type, op.colocation_groups())) return None
<SYSTEM_TASK:> Returns error message for colocation of signature inputs, or None if ok. <END_TASK> <USER_TASK:> Description: def find_signature_input_colocation_error(signature_name, inputs): """Returns error message for colocation of signature inputs, or None if ok."""
for input_name, tensor in inputs.items(): expected_colocation_groups = [tf.compat.as_bytes("loc:@" + tensor.op.name)] if tensor.op.colocation_groups() != expected_colocation_groups: return ( "A tensor x used as input in a signature must not be subject to a " "tf.colocate_with(y) constraint. (The reverse would be allowed.)\n" "Details: tensor '%s' appears as input '%s' of signature '%s' " "but has Tensor.op.colocation_groups() == %s" % (tensor, input_name, signature_name, tensor.op.colocation_groups())) return None
<SYSTEM_TASK:> Returns error message for module inputs from ops with multiple outputs. <END_TASK> <USER_TASK:> Description: def find_signature_inputs_from_multivalued_ops(inputs): """Returns error message for module inputs from ops with multiple outputs."""
dense_inputs = [] # List of (str, Tensor), with SparseTensors decomposed. for name, tensor in sorted(inputs.items()): if isinstance(tensor, tf.SparseTensor): dense_inputs.extend(("%s.%s" % (name, attr), getattr(tensor, attr)) for attr in ("indices", "values", "dense_shape")) else: dense_inputs.append((name, tensor)) warnings = [(name, tensor.name) for name, tensor in dense_inputs if len(tensor.op.outputs) != 1] if warnings: return ( "WARNING: The inputs declared in hub.add_signature() should be tensors " "from ops with a single output, or else uses of tf.colocate_with() on " "that op can trigger fatal errors when the module is applied and " "colocation constraints have to be rewritten.\nAffected inputs: %s" % ", ".join("%s='%s'" % pair for pair in warnings)) return None
<SYSTEM_TASK:> Creates the graph nodes that hold the state of the Module. <END_TASK> <USER_TASK:> Description: def _create_state_graph(self, name): """Creates the graph nodes that hold the state of the Module. Args: name: name scope to create the state graph in. Returns: A tuple consisting of: variables_tensor_map: a map from tensor names in the original graph def to the created Variables objects. state_map: a map from tensors names in the original graph def to the instantiated tensors to be used as a state_map. """
import_collections = [ tf_v1.GraphKeys.GLOBAL_VARIABLES, tf_v1.GraphKeys.MODEL_VARIABLES, tf_v1.GraphKeys.TABLE_INITIALIZERS, tf_v1.GraphKeys.ASSET_FILEPATHS, # Typically used to initialize tables. tf_v1.GraphKeys.COND_CONTEXT, tf_v1.GraphKeys.WHILE_CONTEXT, ] if self._trainable: # TODO(b/64049014): Import UPDATE_OPS which do not depend on inputs. import_collections.extend([tf_v1.GraphKeys.TRAINABLE_VARIABLES, tf_v1.GraphKeys.REGULARIZATION_LOSSES]) absolute_scope_name = tf_v1.get_default_graph().unique_name( name, mark_as_used=False) relative_scope_name = absolute_scope_name.split("/")[-1] assert relative_scope_name == name # verify name scope was indeed unused. meta_graph = meta_graph_pb2.MetaGraphDef() meta_graph.CopyFrom(self._meta_graph) meta_graph_lib.filter_collections(meta_graph, import_collections) meta_graph_lib.prefix_shared_name_attributes(meta_graph, absolute_scope_name) tf_v1.train.import_meta_graph( meta_graph, input_map={}, import_scope=relative_scope_name) # Build a list from the variable name in the module definition to the actual # instantiated variables. variables_tensor_map = {} for var in tf_v1.global_variables(): if var.op.name.startswith(absolute_scope_name + "/"): variables_tensor_map[var.name[len(absolute_scope_name)+1:]] = var # Build a map of tensors to feed from the state-graph into subsequent # apply-graphs. def _get_tensor(tensor_name): return tf_v1.get_default_graph().get_tensor_by_name( meta_graph_lib.prepend_name_scope( tensor_name, import_scope=absolute_scope_name)) state_op_names = list_registered_stateful_ops_without_inputs() state_map = get_state_map(meta_graph, state_op_names, set(), _get_tensor) return variables_tensor_map, state_map
<SYSTEM_TASK:> Receives a value for the object and some context on its source. <END_TASK> <USER_TASK:> Description: def Set(self, value, context=None): """Receives a value for the object and some context on its source."""
if self.has_error: return if self.value is None: self.value = value self._context["old_value"] = value self._context.update({"old_" + k: v for k, v in context.items()}) elif self.value != value: self.has_error = True self._context["new_value"] = value self._context.update({"new_" + k: v for k, v in context.items()})
<SYSTEM_TASK:> Gets consistent value or raises ValueError with formatted contexts. <END_TASK> <USER_TASK:> Description: def GetConsistentValueOrRaise(self, error_format, context=None): """Gets consistent value or raises ValueError with formatted contexts."""
if self.has_error: full_context = dict(self._context) if context: full_context.update(context) raise ValueError(error_format.format(**full_context)) return self.value
<SYSTEM_TASK:> Returns the directory where to cache the module. <END_TASK> <USER_TASK:> Description: def _module_dir(handle): """Returns the directory where to cache the module."""
cache_dir = resolver.tfhub_cache_dir(use_temp=True) return resolver.create_local_module_dir( cache_dir, hashlib.sha1(handle.encode("utf8")).hexdigest())
<SYSTEM_TASK:> Returns the path for storing variables checkpoints. <END_TASK> <USER_TASK:> Description: def get_variables_path(export_dir): """Returns the path for storing variables checkpoints."""
return os.path.join( tf.compat.as_bytes(export_dir), tf.compat.as_bytes(tf_v1.saved_model.constants.VARIABLES_DIRECTORY), tf.compat.as_bytes(tf_v1.saved_model.constants.VARIABLES_FILENAME))
<SYSTEM_TASK:> Adds a signature to current graph. <END_TASK> <USER_TASK:> Description: def add_signature(key, inputs, outputs): """Adds a signature to current graph. Args: key: Signature key as a string. inputs: Signature inputs as a map from string to Tensor or SparseTensor. outputs: Signature outputs as a map from string to Tensor or SparseTensor. (Recall that a Variable is not a Tensor, but Variable.value() is.) Raises: TypeError: if the arguments have the wrong types. """
_check_dict_maps_to_tensors_or_sparse_tensors(inputs) _check_dict_maps_to_tensors_or_sparse_tensors(outputs) input_info = { input_name: tf_v1.saved_model.utils.build_tensor_info(tensor) for input_name, tensor in inputs.items() } output_info = { output_name: tf_v1.saved_model.utils.build_tensor_info(tensor) for output_name, tensor in outputs.items() } signature = tf_v1.saved_model.signature_def_utils.build_signature_def( input_info, output_info) tf_v1.add_to_collection(_SIGNATURE_COLLECTION, (key, signature))
<SYSTEM_TASK:> Adds a ModuleAttachment to the current graph. <END_TASK> <USER_TASK:> Description: def attach_bytes(key, the_bytes): """Adds a ModuleAttachment to the current graph. Args: key: A string with the unique key of the attachment. the_bytes: A bytes object with the serialized attachment. """
tf_v1.add_to_collection( _ATTACHMENT_COLLECTION_INTERNAL, module_attachment_pb2.ModuleAttachment(key=key, value=the_bytes))
<SYSTEM_TASK:> Exports ModuleAttachments from the current tf.Graph into `meta_graph`. <END_TASK> <USER_TASK:> Description: def _export_module_attachments(meta_graph): """Exports ModuleAttachments from the current tf.Graph into `meta_graph`."""
added_attachments = tf_v1.get_collection(_ATTACHMENT_COLLECTION_INTERNAL) if not added_attachments: return # Don't touch `meta_graph`. unique_attachments = collections.OrderedDict( # Avoid indeterminism. (attachment.key, attachment) for attachment in added_attachments) meta_graph.collection_def[ATTACHMENT_COLLECTION_SAVED].bytes_list.value[:] = [ attachment.SerializeToString() for attachment in unique_attachments.values()]
<SYSTEM_TASK:> Returns the dict of ModuleAttachments stored in `meta_graph`. <END_TASK> <USER_TASK:> Description: def get_attached_bytes_map(meta_graph): """Returns the dict of ModuleAttachments stored in `meta_graph`. Args: meta_graph: A MetaGraphDef, as built by SavedModelHandler.add_graph_copy() from some graph. Returns: A dict, containing the `(key, bytes)` items passed to `attach_bytes()` when the graph had been built. Raises: ValueError: if `meta-graph` is malformed. """
result = {} if ATTACHMENT_COLLECTION_SAVED not in meta_graph.collection_def: return result collection_def = meta_graph.collection_def[ATTACHMENT_COLLECTION_SAVED] if collection_def.WhichOneof("kind") != "bytes_list": raise ValueError( "Internal CollectionDef for attached messages has kind %s, " "expected bytes_list" % collection_def.WhichOneof("kind")) attachment = module_attachment_pb2.ModuleAttachment() for value in collection_def.bytes_list.value: attachment.ParseFromString(value) result[attachment.key] = attachment.value # Immutable; needs no copy. return result
<SYSTEM_TASK:> Raises TypeError if `node_def` does not match the expectations. <END_TASK> <USER_TASK:> Description: def _check_asset_node_def(node_def): """Raises TypeError if `node_def` does not match the expectations."""
if node_def.op != "Const": raise TypeError("Asset node must be of type constant.") if tf.as_dtype(node_def.attr["dtype"].type) != tf.string: raise TypeError("Asset node must be of dtype string.") if len(node_def.attr["value"].tensor.string_val) != 1: raise TypeError("Asset node must be a scalar.")
<SYSTEM_TASK:> Merges the ASSETS_KEY collection into the GraphDefs in saved_model_proto. <END_TASK> <USER_TASK:> Description: def _merge_assets_key_collection(saved_model_proto, path): """Merges the ASSETS_KEY collection into the GraphDefs in saved_model_proto. Removes the ASSETS_KEY collection from the GraphDefs in the SavedModel and modifies nodes with the assets filenames to point to the assets in `path`. After this transformation, the SavedModel GraphDefs can be used without feeding asset tensors. Args: saved_model_proto: SavedModel proto to be modified. path: path where the SavedModel is being loaded from. """
for meta_graph in saved_model_proto.meta_graphs: node_asset_map = {} if tf_v1.saved_model.constants.ASSETS_KEY in meta_graph.collection_def: assets_any_proto = meta_graph.collection_def[ tf_v1.saved_model.constants.ASSETS_KEY].any_list.value for asset_any_proto in assets_any_proto: asset_proto = meta_graph_pb2.AssetFileDef() asset_any_proto.Unpack(asset_proto) asset_filename = _get_asset_filename(path, asset_proto.filename) node_asset_map[_get_node_name_from_tensor( asset_proto.tensor_info.name)] = asset_filename del meta_graph.collection_def[tf_v1.saved_model.constants.ASSETS_KEY] for node in meta_graph.graph_def.node: asset_filepath = node_asset_map.get(node.name) if asset_filepath: _check_asset_node_def(node) node.attr["value"].tensor.string_val[0] = asset_filepath
<SYSTEM_TASK:> Creates an ASSETS_KEY collection in the GraphDefs in saved_model_proto. <END_TASK> <USER_TASK:> Description: def _make_assets_key_collection(saved_model_proto, export_path): """Creates an ASSETS_KEY collection in the GraphDefs in saved_model_proto. Adds an ASSETS_KEY collection to the GraphDefs in the SavedModel and returns a map from original asset filename to filename when exporting the SavedModel to `export_path`. This is roughly the inverse operation of `_merge_assets_key_collection`. Args: saved_model_proto: SavedModel proto to be modified. export_path: string with path where the saved_model_proto will be exported. Returns: A map from original asset filename to asset filename when exporting the SavedModel to path. Raises: ValueError: on unsuported/unexpected SavedModel. """
asset_filenames = {} used_asset_filenames = set() def _make_asset_filename(original_filename): """Returns the asset filename to use for the file.""" if original_filename in asset_filenames: return asset_filenames[original_filename] basename = os.path.basename(original_filename) suggestion = basename index = 0 while suggestion in used_asset_filenames: suggestion = "%s%d" % (basename, index) index += 1 asset_filenames[original_filename] = suggestion used_asset_filenames.add(suggestion) return suggestion for meta_graph in saved_model_proto.meta_graphs: collection_def = meta_graph.collection_def.get( tf_v1.GraphKeys.ASSET_FILEPATHS) if collection_def is None: continue if collection_def.WhichOneof("kind") != "node_list": raise ValueError( "MetaGraph collection ASSET_FILEPATHS is not a list of tensors.") for tensor in collection_def.node_list.value: if not tensor.endswith(":0"): raise ValueError("Unexpected tensor in ASSET_FILEPATHS collection.") asset_nodes = set([ _get_node_name_from_tensor(tensor) for tensor in collection_def.node_list.value ]) tensor_filename_map = {} for node in meta_graph.graph_def.node: if node.name in asset_nodes: _check_asset_node_def(node) filename = node.attr["value"].tensor.string_val[0] tensor_filename_map[node.name + ":0"] = filename # Clear value to avoid leaking the original path. node.attr["value"].tensor.string_val[0] = ( tf.compat.as_bytes("SAVEDMODEL-ASSET")) if tensor_filename_map: assets_key_collection = meta_graph.collection_def[ tf_v1.saved_model.constants.ASSETS_KEY] for tensor, filename in sorted(tensor_filename_map.items()): asset_proto = meta_graph_pb2.AssetFileDef() asset_proto.filename = _make_asset_filename(filename) asset_proto.tensor_info.name = tensor assets_key_collection.any_list.value.add().Pack(asset_proto) return { original_filename: _get_asset_filename(export_path, asset_filename) for original_filename, asset_filename in asset_filenames.items() }
<SYSTEM_TASK:> Adds a copy of Graph with the specified set of tags. <END_TASK> <USER_TASK:> Description: def add_graph_copy(self, graph, tags=None): """Adds a copy of Graph with the specified set of tags."""
with graph.as_default(): # Remove default attrs so that Modules created by a tensorflow version # with ops that have new attrs that are left to their default values can # still be loaded by older versions unware of those attributes. meta_graph = tf_v1.train.export_meta_graph(strip_default_attrs=True) _export_tags(meta_graph, tags) _export_signatures(meta_graph) _export_module_attachments(meta_graph) self._proto.meta_graphs.extend([meta_graph])
<SYSTEM_TASK:> Returns a copy of a MetaGraph with the identical set of tags. <END_TASK> <USER_TASK:> Description: def get_meta_graph_copy(self, tags=None): """Returns a copy of a MetaGraph with the identical set of tags."""
meta_graph = self.get_meta_graph(tags) copy = tf_v1.MetaGraphDef() copy.CopyFrom(meta_graph) return copy
<SYSTEM_TASK:> Exports to SavedModel directory. <END_TASK> <USER_TASK:> Description: def export(self, path, variables_saver=None): """Exports to SavedModel directory. Args: path: path where to export the SavedModel to. variables_saver: lambda that receives a directory path where to export checkpoints of variables. """
# Operate on a copy of self._proto since it needs to be modified. proto = saved_model_pb2.SavedModel() proto.CopyFrom(self._proto) assets_map = _make_assets_key_collection(proto, path) self._save_all_assets(path, assets_map) self._save_variables(path, variables_saver) self._save_proto(path, proto)
<SYSTEM_TASK:> Returns the matching MetaGraphDef or raises KeyError. <END_TASK> <USER_TASK:> Description: def get_meta_graph(self, tags=None): """Returns the matching MetaGraphDef or raises KeyError."""
matches = [meta_graph for meta_graph in self.meta_graphs if set(meta_graph.meta_info_def.tags) == set(tags or [])] if not matches: raise KeyError("SavedModelHandler has no graph with tags: %r" % tags) if len(matches) != 1: raise KeyError( "SavedModelHandler has multiple graphs with tags %r" % tags) return matches[0]
<SYSTEM_TASK:> Converts from inputs into dict of input tensors. <END_TASK> <USER_TASK:> Description: def _convert_dict_inputs(inputs, tensor_info_map): """Converts from inputs into dict of input tensors. This handles: - putting inputs into a dict, per _prepare_dict_inputs(), - converting all input values into tensors compatible with the expected input tensor (dtype, shape). - check sparse/non-sparse tensor types. Args: inputs: inputs fed to Module.__call__(). tensor_info_map: A map from string to `tensor_info.ParsedTensorInfo` describing the signature inputs. Returns: A dict of tensors to feed to the signature instantiation. Raises: TypeError: If it fails to convert the input values into a dict of tensors to feed to the signature instantiation. """
dict_inputs = _prepare_dict_inputs(inputs, tensor_info_map) return tensor_info.convert_dict_to_compatible_tensor(dict_inputs, tensor_info_map)
<SYSTEM_TASK:> Context manager that yields a function to directly evaluate a Module. <END_TASK> <USER_TASK:> Description: def eval_function_for_module(spec, tags=None): """Context manager that yields a function to directly evaluate a Module. This creates a separate graph, in which all of the signatures of the module are instantiated. Then, it creates a session and initializes the module variables. Finally, it returns a function which can be used to evaluate the module signatures. The function returned by eval_function_for_module has the same syntax as Module.__call__ , except that inputs and outputs are not tensors but actual values as used with Session.run(). ```python with hub.eval_function_for_module("/tmp/text-embedding") as f: # The module can be directly evaluated using f without constructing a graph. embeddings = f(["Hello world!",], signature="mysignature") ``` Args: spec: A ModuleSpec defining the Module to instantiate or a path where to load a ModuleSpec from via `load_module_spec`. tags: A set of strings specifying the graph variant to use. Yields: A function whose keyword arguments are fed into the tfhub module and which returns a dictionary with the value of the output tensors. Raises: RuntimeError: explaning the reason why it failed to instantiate the Module. ValueError: if the requested graph variant does not exists. """
# We create a separate graph and add all the signatures of the module to it. original_graph = tf_v1.get_default_graph() with tf.Graph().as_default(): module = Module(spec, tags=tags) input_tensors_per_signature = {} output_tensors_per_signature = {} for signature in module.get_signature_names(): # We scope with the signature name as different signatures will likely # contain tensors with the same name (e.g. the input and output tensors). with tf_v1.variable_scope(signature): input_tensors = {} for name, tensorinfo in module.get_input_info_dict(signature).items(): # We need to be care with the shape as it may be fully-known, # partially-known or even unknown. shape = tensorinfo.get_shape() effective_shape = None if shape.dims is None else shape.as_list() if tensorinfo.is_sparse: input_tensors[name] = tf_v1.sparse_placeholder( tensorinfo.dtype, shape=effective_shape, name=name) else: input_tensors[name] = tf_v1.placeholder( tensorinfo.dtype, shape=effective_shape, name=name) input_tensors_per_signature[signature] = input_tensors output_tensors_per_signature[signature] = module( input_tensors_per_signature[signature], signature=signature, as_dict=True) # Evaluating the tfhub module requires an active tensorflow session. with tf_v1.train.SingularMonitoredSession() as sess: def func( inputs=None, _sentinel=None, # pylint: disable=invalid-name signature=None, as_dict=None): """Function that directly evaluates a signature in the module.""" signature = signature or "default" input_tensors = input_tensors_per_signature[signature] dict_inputs = _prepare_dict_inputs(inputs, input_tensors) # The input arguments are directly fed into the session. feed_dict = { input_tensors[key]: value for key, value in dict_inputs.items() } output = output_tensors_per_signature[signature] output = _prepare_outputs(output, as_dict) return sess.run(output, feed_dict=feed_dict) with original_graph.as_default(): # Yield the function since that will keep the session alive until the # user exits the context. yield func
<SYSTEM_TASK:> Describes the inputs required by a signature. <END_TASK> <USER_TASK:> Description: def get_input_info_dict(self, signature=None): """Describes the inputs required by a signature. Args: signature: A string with the signature to get inputs information for. If None, the default signature is used if defined. Returns: The result of ModuleSpec.get_input_info_dict() for the given signature, and the graph variant selected by `tags` when this Module was initialized. Raises: KeyError: if there is no such signature. """
return self._spec.get_input_info_dict(signature=signature, tags=self._tags)
<SYSTEM_TASK:> Describes the outputs provided by a signature. <END_TASK> <USER_TASK:> Description: def get_output_info_dict(self, signature=None): """Describes the outputs provided by a signature. Args: signature: A string with the signature to get ouputs information for. If None, the default signature is used if defined. Returns: The result of ModuleSpec.get_output_info_dict() for the given signature, and the graph variant selected by `tags` when this Module was initialized. Raises: KeyError: if there is no such signature. """
return self._spec.get_output_info_dict(signature=signature, tags=self._tags)
<SYSTEM_TASK:> Exports the module with the variables from the session in `path`. <END_TASK> <USER_TASK:> Description: def export(self, path, session): """Exports the module with the variables from the session in `path`. Note that it is the module definition in the ModuleSpec used to create this module that gets exported. The session is only used to provide the value of variables. Args: path: path where to export the module to. session: session where to export the variables from. Raises: RuntimeError: if there is an issue during the export. """
if self._graph is not tf_v1.get_default_graph(): raise RuntimeError("default graph differs from the graph where the " "module was instantiated.") if self._graph is not session.graph: raise RuntimeError("session graph differs from the graph where the " "module was instantiated.") self._impl.export(path, session)
<SYSTEM_TASK:> Returns the list of all tf.Variables created by module instantiation. <END_TASK> <USER_TASK:> Description: def variables(self): """Returns the list of all tf.Variables created by module instantiation."""
result = [] for _, value in sorted(self.variable_map.items()): if isinstance(value, list): result.extend(value) else: result.append(value) return result
<SYSTEM_TASK:> Uses a Module to construct a dense representation from a text feature. <END_TASK> <USER_TASK:> Description: def text_embedding_column(key, module_spec, trainable=False): """Uses a Module to construct a dense representation from a text feature. This feature column can be used on an input feature whose values are strings of arbitrary size. The result of this feature column is the result of passing its `input` through the module `m` instantiated from `module_spec`, as per `result = m(input)`. The `result` must have dtype float32 and shape `[batch_size, num_features]` with a known value of num_features. Example: ```python comment = text_embedding_column("comment", "/tmp/text-module") feature_columns = [comment, ...] ... features = { "comment": np.array(["wow, much amazing", "so easy", ...]), ... } labels = np.array([[1], [0], ...]) # If running TF 2.x, use `tf.compat.v1.estimator.inputs.numpy_input_fn` input_fn = tf.estimator.inputs.numpy_input_fn(features, labels, shuffle=True) estimator = tf.estimator.DNNClassifier(hidden_units, feature_columns) estimator.train(input_fn, max_steps=100) ``` Args: key: A string or `_FeatureColumn` identifying the text feature. module_spec: A ModuleSpec defining the Module to instantiate or a path where to load a ModuleSpec via `load_module_spec` trainable: Whether or not the Module is trainable. False by default, meaning the pre-trained weights are frozen. This is different from the ordinary tf.feature_column.embedding_column(), but that one is intended for training from scratch. Returns: `_DenseColumn` that converts from text input. Raises: ValueError: if module_spec is not suitable for use in this feature column. """
module_spec = module.as_module_spec(module_spec) _check_module_is_text_embedding(module_spec) return _TextEmbeddingColumn(key=key, module_spec=module_spec, trainable=trainable)
<SYSTEM_TASK:> Raises ValueError if `module_spec` is not a text-embedding module. <END_TASK> <USER_TASK:> Description: def _check_module_is_text_embedding(module_spec): """Raises ValueError if `module_spec` is not a text-embedding module. Args: module_spec: A `ModuleSpec` to test. Raises: ValueError: if `module_spec` default signature is not compatible with Tensor(string, shape=(?,)) -> Tensor(float32, shape=(?,K)). """
issues = [] # Find issues with signature inputs. input_info_dict = module_spec.get_input_info_dict() if len(input_info_dict) != 1: issues.append("Module default signature must require only one input") else: input_info, = input_info_dict.values() input_shape = input_info.get_shape() if not (input_info.dtype == tf.string and input_shape.ndims == 1 and input_shape.as_list() == [None]): issues.append( "Module default signature must have only one input " "tf.Tensor(shape=(?,), dtype=string)" ) # Find issues with signature outputs. output_info_dict = module_spec.get_output_info_dict() if "default" not in output_info_dict: issues.append("Module default signature must have a 'default' output.") else: output_info = output_info_dict["default"] output_shape = output_info.get_shape() if not (output_info.dtype == tf.float32 and output_shape.ndims == 2 and not output_shape.as_list()[0] and output_shape.as_list()[1]): issues.append( "Module default signature must have a 'default' output of " "tf.Tensor(shape=(?,K), dtype=float32)." ) if issues: raise ValueError("Module is not a text-embedding: %r" % issues)
<SYSTEM_TASK:> Uses a Module to get a dense 1-D representation from the pixels of images. <END_TASK> <USER_TASK:> Description: def image_embedding_column(key, module_spec): """Uses a Module to get a dense 1-D representation from the pixels of images. This feature column can be used on images, represented as float32 tensors of RGB pixel data in the range [0,1]. This can be read from a numeric_column() if the tf.Example input data happens to have decoded images, all with the same shape [height, width, 3]. More commonly, the input_fn will have code to explicitly decode images, resize them (possibly after performing data augmentation such as random crops etc.), and provide a batch of shape [batch_size, height, width, 3]. The result of this feature column is the result of passing its `input` through the module `m` instantiated from `module_spec`, as per `result = m({"images": input})`. The `result` must have dtype float32 and shape `[batch_size, num_features]` with a known value of num_features. Example: ```python image_column = hub.image_embedding_column("embeddings", "/tmp/image-module") feature_columns = [image_column, ...] estimator = tf.estimator.LinearClassifier(feature_columns, ...) height, width = hub.get_expected_image_size(image_column.module_spec) input_fn = ... # Provides "embeddings" with shape [None, height, width, 3]. estimator.train(input_fn, ...) ``` Args: key: A string or `_FeatureColumn` identifying the input image data. module_spec: A string handle or a `ModuleSpec` identifying the module. Returns: `_DenseColumn` that converts from pixel data. Raises: ValueError: if module_spec is not suitable for use in this feature column. """
module_spec = module.as_module_spec(module_spec) _check_module_is_image_embedding(module_spec) return _ImageEmbeddingColumn(key=key, module_spec=module_spec)
<SYSTEM_TASK:> Raises ValueError if `module_spec` is not usable as image embedding. <END_TASK> <USER_TASK:> Description: def _check_module_is_image_embedding(module_spec): """Raises ValueError if `module_spec` is not usable as image embedding. Args: module_spec: A `_ModuleSpec` to test. Raises: ValueError: if `module_spec` default signature is not compatible with mappingan "images" input to a Tensor(float32, shape=(_,K)). """
issues = [] # Find issues with "default" signature inputs. The common signatures for # image models prescribe a specific name; we trust it if we find it # and if we can do the necessary inference of input shapes from it. input_info_dict = module_spec.get_input_info_dict() if (list(input_info_dict.keys()) != ["images"] or input_info_dict["images"].dtype != tf.float32): issues.append("Module 'default' signature must require a single input, " "which must have type float32 and name 'images'.") else: try: image_util.get_expected_image_size(module_spec) except ValueError as e: issues.append("Module does not support hub.get_expected_image_size(); " "original error was:\n" + str(e)) # Raised again below. # Find issues with "default" signature outputs. We test that the dtype and # shape is appropriate for use in input_layer(). output_info_dict = module_spec.get_output_info_dict() if "default" not in output_info_dict: issues.append("Module 'default' signature must have a 'default' output.") else: output_type = output_info_dict["default"].dtype output_shape = output_info_dict["default"].get_shape() if not (output_type == tf.float32 and output_shape.ndims == 2 and output_shape.dims[1].value): issues.append("Module 'default' signature must have a 'default' output " "of tf.Tensor(shape=(_,K), dtype=float32).") if issues: raise ValueError("Module is not usable as image embedding: %r" % issues)
<SYSTEM_TASK:> Returns string. Used for variable_scope and naming. <END_TASK> <USER_TASK:> Description: def name(self): """Returns string. Used for variable_scope and naming."""
if not hasattr(self, "_name"): self._name = "{}_hub_module_embedding".format(self.key) return self._name
<SYSTEM_TASK:> Returns a `tf.Example` parsing spec as dict. <END_TASK> <USER_TASK:> Description: def _parse_example_spec(self): """Returns a `tf.Example` parsing spec as dict."""
height, width = image_util.get_expected_image_size(self.module_spec) input_shape = [height, width, 3] return {self.key: tf_v1.FixedLenFeature(input_shape, tf.float32)}
<SYSTEM_TASK:> Returns cache directory. <END_TASK> <USER_TASK:> Description: def tfhub_cache_dir(default_cache_dir=None, use_temp=False): """Returns cache directory. Returns cache directory from either TFHUB_CACHE_DIR environment variable or --tfhub_cache_dir or default, if set. Args: default_cache_dir: Default cache location to use if neither TFHUB_CACHE_DIR environment variable nor --tfhub_cache_dir are not specified. use_temp: bool, Optional to enable using system's temp directory as a module cache directory if neither default_cache_dir nor --tfhub_cache_dir nor TFHUB_CACHE_DIR environment variable are specified . """
# Note: We are using FLAGS["tfhub_cache_dir"] (and not FLAGS.tfhub_cache_dir) # to access the flag value in order to avoid parsing argv list. The flags # should have been parsed by now in main() by tf.app.run(). If that was not # the case (say in Colab env) we skip flag parsing because argv may contain # unknown flags. cache_dir = ( os.getenv(_TFHUB_CACHE_DIR, "") or FLAGS["tfhub_cache_dir"].value or default_cache_dir) if not cache_dir and use_temp: # Place all TF-Hub modules under <system's temp>/tfhub_modules. cache_dir = os.path.join(tempfile.gettempdir(), "tfhub_modules") if cache_dir: logging.log_first_n(logging.INFO, "Using %s to cache modules.", 1, cache_dir) return cache_dir
<SYSTEM_TASK:> Creates and returns the name of directory where to cache a module. <END_TASK> <USER_TASK:> Description: def create_local_module_dir(cache_dir, module_name): """Creates and returns the name of directory where to cache a module."""
tf_v1.gfile.MakeDirs(cache_dir) return os.path.join(cache_dir, module_name)
<SYSTEM_TASK:> Writes a descriptor file about the directory containing a module. <END_TASK> <USER_TASK:> Description: def _write_module_descriptor_file(handle, module_dir): """Writes a descriptor file about the directory containing a module. Args: handle: Module name/handle. module_dir: Directory where a module was downloaded. """
readme = _module_descriptor_file(module_dir) readme_content = ( "Module: %s\nDownload Time: %s\nDownloader Hostname: %s (PID:%d)" % (handle, str(datetime.datetime.today()), socket.gethostname(), os.getpid())) # The descriptor file has no semantic meaning so we allow 'overwrite' since # there is a chance that another process might have written the file (and # crashed), we just overwrite it. tf_utils.atomic_write_string_to_file(readme, readme_content, overwrite=True)
<SYSTEM_TASK:> Returns the size of the temp dir pointed to by the given lock file. <END_TASK> <USER_TASK:> Description: def _locked_tmp_dir_size(lock_filename): """Returns the size of the temp dir pointed to by the given lock file."""
task_uid = _task_uid_from_lock_file(lock_filename) try: return _dir_size( _temp_download_dir(_module_dir(lock_filename), task_uid)) except tf.errors.NotFoundError: return 0
<SYSTEM_TASK:> Waits for the lock file to disappear. <END_TASK> <USER_TASK:> Description: def _wait_for_lock_to_disappear(handle, lock_file, lock_file_timeout_sec): """Waits for the lock file to disappear. The lock file was created by another process that is performing a download into its own temporary directory. The name of this temp directory is sha1(<module>).<uuid>.tmp where <uuid> comes from the lock file. Args: handle: The location from where a module is being download. lock_file: Lock file created by another process downloading this module. lock_file_timeout_sec: The amount of time to wait (in seconds) before we can declare that the other downloaded has been abandoned. The download is declared abandoned if there is no file size change in the temporary directory within the last 'lock_file_timeout_sec'. """
locked_tmp_dir_size = 0 locked_tmp_dir_size_check_time = time.time() lock_file_content = None while tf_v1.gfile.Exists(lock_file): try: logging.log_every_n( logging.INFO, "Module '%s' already being downloaded by '%s'. Waiting.", 10, handle, tf_utils.read_file_to_string(lock_file)) if (time.time() - locked_tmp_dir_size_check_time > lock_file_timeout_sec): # Check whether the holder of the current lock downloaded anything # in its temporary directory in the last 'lock_file_timeout_sec'. cur_locked_tmp_dir_size = _locked_tmp_dir_size(lock_file) cur_lock_file_content = tf_utils.read_file_to_string(lock_file) if (cur_locked_tmp_dir_size == locked_tmp_dir_size and cur_lock_file_content == lock_file_content): # There is was no data downloaded in the past # 'lock_file_timeout_sec'. Steal the lock and proceed with the # local download. logging.warning("Deleting lock file %s due to inactivity.", lock_file) tf_v1.gfile.Remove(lock_file) break locked_tmp_dir_size = cur_locked_tmp_dir_size locked_tmp_dir_size_check_time = time.time() lock_file_content = cur_lock_file_content except tf.errors.NotFoundError: # Lock file or temp directory were deleted during check. Continue # to check whether download succeeded or we need to start our own # download. pass finally: time.sleep(5)
<SYSTEM_TASK:> Returns the path to a Module directory for a given TF-Hub Module handle. <END_TASK> <USER_TASK:> Description: def atomic_download(handle, download_fn, module_dir, lock_file_timeout_sec=10 * 60): """Returns the path to a Module directory for a given TF-Hub Module handle. Args: handle: (string) Location of a TF-Hub Module. download_fn: Callback function that actually performs download. The callback receives two arguments, handle and the location of a temporary directory to download the content into. module_dir: Directory where to download the module files to. lock_file_timeout_sec: The amount of time we give the current holder of the lock to make progress in downloading a module. If no progress is made, the lock is revoked. Returns: A string containing the path to a TF-Hub Module directory. Raises: ValueError: if the Module is not found. """
lock_file = _lock_filename(module_dir) task_uid = uuid.uuid4().hex lock_contents = _lock_file_contents(task_uid) tmp_dir = _temp_download_dir(module_dir, task_uid) # Attempt to protect against cases of processes being cancelled with # KeyboardInterrupt by using a try/finally clause to remove the lock # and tmp_dir. try: while True: try: tf_utils.atomic_write_string_to_file(lock_file, lock_contents, overwrite=False) # Must test condition again, since another process could have created # the module and deleted the old lock file since last test. if tf_v1.gfile.Exists(module_dir): # Lock file will be deleted in the finally-clause. return module_dir break # Proceed to downloading the module. except tf.errors.OpError: pass # Wait for lock file to disappear. _wait_for_lock_to_disappear(handle, lock_file, lock_file_timeout_sec) # At this point we either deleted a lock or a lock got removed by the # owner or another process. Perform one more iteration of the while-loop, # we would either terminate due tf_v1.gfile.Exists(module_dir) or because # we would obtain a lock ourselves, or wait again for the lock to # disappear. # Lock file acquired. logging.info("Downloading TF-Hub Module '%s'.", handle) tf_v1.gfile.MakeDirs(tmp_dir) download_fn(handle, tmp_dir) # Write module descriptor to capture information about which module was # downloaded by whom and when. The file stored at the same level as a # directory in order to keep the content of the 'model_dir' exactly as it # was define by the module publisher. # # Note: The descriptor is written purely to help the end-user to identify # which directory belongs to which module. The descriptor is not part of the # module caching protocol and no code in the TF-Hub library reads its # content. _write_module_descriptor_file(handle, module_dir) try: tf_v1.gfile.Rename(tmp_dir, module_dir) logging.info("Downloaded TF-Hub Module '%s'.", handle) except tf.errors.AlreadyExistsError: logging.warning("Module already exists in %s", module_dir) finally: try: # Temp directory is owned by the current process, remove it. tf_v1.gfile.DeleteRecursively(tmp_dir) except tf.errors.NotFoundError: pass try: contents = tf_utils.read_file_to_string(lock_file) except tf.errors.NotFoundError: contents = "" if contents == lock_contents: # Lock file exists and is owned by this process. try: tf_v1.gfile.Remove(lock_file) except tf.errors.NotFoundError: pass return module_dir
<SYSTEM_TASK:> Prints a message about download progress either to the console or TF log. <END_TASK> <USER_TASK:> Description: def _print_download_progress_msg(self, msg, flush=False): """Prints a message about download progress either to the console or TF log. Args: msg: Message to print. flush: Indicates whether to flush the output (only used in interactive mode). """
if self._interactive_mode(): # Print progress message to console overwriting previous progress # message. self._max_prog_str = max(self._max_prog_str, len(msg)) sys.stdout.write("\r%-{}s".format(self._max_prog_str) % msg) sys.stdout.flush() if flush: print("\n") else: # Interactive progress tracking is disabled. Print progress to the # standard TF log. logging.info(msg)
<SYSTEM_TASK:> Logs progress information about ongoing module download. <END_TASK> <USER_TASK:> Description: def _log_progress(self, bytes_downloaded): """Logs progress information about ongoing module download. Args: bytes_downloaded: Number of bytes downloaded. """
self._total_bytes_downloaded += bytes_downloaded now = time.time() if (self._interactive_mode() or now - self._last_progress_msg_print_time > 15): # Print progress message every 15 secs or if interactive progress # tracking is enabled. self._print_download_progress_msg( "Downloading %s: %s" % (self._url, tf_utils.bytes_to_readable_str( self._total_bytes_downloaded, True))) self._last_progress_msg_print_time = now
<SYSTEM_TASK:> Extracts 'tarinfo' from 'tgz' and writes to 'dst_path'. <END_TASK> <USER_TASK:> Description: def _extract_file(self, tgz, tarinfo, dst_path, buffer_size=10<<20): """Extracts 'tarinfo' from 'tgz' and writes to 'dst_path'."""
src = tgz.extractfile(tarinfo) dst = tf_v1.gfile.GFile(dst_path, "wb") while 1: buf = src.read(buffer_size) if not buf: break dst.write(buf) self._log_progress(len(buf)) dst.close() src.close()
<SYSTEM_TASK:> Streams the content for the 'fileobj' and stores the result in dst_path. <END_TASK> <USER_TASK:> Description: def download_and_uncompress(self, fileobj, dst_path): """Streams the content for the 'fileobj' and stores the result in dst_path. Args: fileobj: File handle pointing to .tar/.tar.gz content. dst_path: Absolute path where to store uncompressed data from 'fileobj'. Raises: ValueError: Unknown object encountered inside the TAR file. """
try: with tarfile.open(mode="r|*", fileobj=fileobj) as tgz: for tarinfo in tgz: abs_target_path = _merge_relative_path(dst_path, tarinfo.name) if tarinfo.isfile(): self._extract_file(tgz, tarinfo, abs_target_path) elif tarinfo.isdir(): tf_v1.gfile.MakeDirs(abs_target_path) else: # We do not support symlinks and other uncommon objects. raise ValueError( "Unexpected object type in tar archive: %s" % tarinfo.type) total_size_str = tf_utils.bytes_to_readable_str( self._total_bytes_downloaded, True) self._print_download_progress_msg( "Downloaded %s, Total size: %s" % (self._url, total_size_str), flush=True) except tarfile.ReadError: raise IOError("%s does not appear to be a valid module." % self._url)
<SYSTEM_TASK:> Prepends name scope to a name. <END_TASK> <USER_TASK:> Description: def prepend_name_scope(name, import_scope): """Prepends name scope to a name."""
# Based on tensorflow/python/framework/ops.py implementation. if import_scope: try: str_to_replace = r"([\^]|loc:@|^)(.*)" return re.sub(str_to_replace, r"\1" + import_scope + r"/\2", tf.compat.as_str_any(name)) except TypeError as e: # If the name is not of a type we can process, simply return it. logging.warning(e) return name else: return name
<SYSTEM_TASK:> In-place prefixes shared_name attributes of nodes. <END_TASK> <USER_TASK:> Description: def prefix_shared_name_attributes(meta_graph, absolute_import_scope): """In-place prefixes shared_name attributes of nodes."""
shared_name_attr = "shared_name" for node in meta_graph.graph_def.node: shared_name_value = node.attr.get(shared_name_attr, None) if shared_name_value and shared_name_value.HasField("s"): if shared_name_value.s: node.attr[shared_name_attr].s = tf.compat.as_bytes( prepend_name_scope( shared_name_value.s, import_scope=absolute_import_scope))
<SYSTEM_TASK:> Function to propagate backwards in the graph and mark nodes as used. <END_TASK> <USER_TASK:> Description: def mark_backward(output_tensor, used_node_names): """Function to propagate backwards in the graph and mark nodes as used. Traverses recursively through the graph from the end tensor, through the op that generates the tensor, and then to the input tensors that feed the op. Nodes encountered are stored in used_node_names. Args: output_tensor: A Tensor which we start the propagation. used_node_names: A list of strings, stores the name of nodes we've marked as visited. """
op = output_tensor.op if op.name in used_node_names: return used_node_names.add(op.name) for input_tensor in op.inputs: mark_backward(input_tensor, used_node_names) for control_input_op in op.control_inputs: used_node_names.add(control_input_op.name) for input_tensor in control_input_op.inputs: mark_backward(input_tensor, used_node_names)
<SYSTEM_TASK:> Function to prune unused ops given a signature def. <END_TASK> <USER_TASK:> Description: def prune_unused_nodes(meta_graph, signature_def): """Function to prune unused ops given a signature def. This function does a graph traversal through from all outputs as defined in the signature_def to collect all used nodes. Then, any nodes which are unused can be discarded. This is useful for graph which are executing eagerly or on TPUs. Args: meta_graph: The input/output MetaGraphDef for which we wish to prune. signature_def: A SignatureDef which specifies the outputs from which we wish to start graph traversal. """
# Instantiate a temporary empty graph so that we have access to Graph API # and import the meta_graph. graph = tf_v1.Graph() with graph.as_default(): tf_v1.train.import_meta_graph(meta_graph, input_map={}, import_scope="") # Traverse from all outputs and mark all nodes. used_node_names = set() for _, tensor_def in signature_def.outputs.items(): output_tensor = graph.get_tensor_by_name(tensor_def.name) mark_backward(output_tensor, used_node_names) # Filter out all nodes in the meta_graph that are not used. node_filter_in_list = [] for node in meta_graph.graph_def.node: # Make a special exception for VarHandleOp. Removing VarhandleOps # will make the graph not importable as they often leave nodes hanging. # These will be disconnected through the feedmap when importing the # metagraph. if node.name in used_node_names or node.op == "VarHandleOp": node_filter_in_list.append(node) del meta_graph.graph_def.node[:] meta_graph.graph_def.node.extend(node_filter_in_list) del graph
<SYSTEM_TASK:> Function to prune the feedmap of nodes which no longer exist. <END_TASK> <USER_TASK:> Description: def prune_feed_map(meta_graph, feed_map): """Function to prune the feedmap of nodes which no longer exist."""
node_names = [x.name + ":0" for x in meta_graph.graph_def.node] keys_to_delete = [] for k, _ in feed_map.items(): if k not in node_names: keys_to_delete.append(k) for k in keys_to_delete: del feed_map[k]
<SYSTEM_TASK:> Writes to `filename` atomically. <END_TASK> <USER_TASK:> Description: def atomic_write_string_to_file(filename, contents, overwrite): """Writes to `filename` atomically. This means that when `filename` appears in the filesystem, it will contain all of `contents`. With write_string_to_file, it is possible for the file to appear in the filesystem with `contents` only partially written. Accomplished by writing to a temp file and then renaming it. Args: filename: string, pathname for a file contents: string, contents that need to be written to the file overwrite: boolean, if false it's an error for `filename` to be occupied by an existing file. """
temp_pathname = (tf.compat.as_bytes(filename) + tf.compat.as_bytes(".tmp") + tf.compat.as_bytes(uuid.uuid4().hex)) with tf_v1.gfile.GFile(temp_pathname, mode="w") as f: f.write(contents) try: tf_v1.gfile.Rename(temp_pathname, filename, overwrite) except tf.errors.OpError: tf_v1.gfile.Remove(temp_pathname) raise
<SYSTEM_TASK:> Builds a path to a new subdirectory within the base directory. <END_TASK> <USER_TASK:> Description: def get_timestamped_export_dir(export_dir_base): """Builds a path to a new subdirectory within the base directory. Each export is written into a new subdirectory named using the current time. This guarantees monotonically increasing version numbers even across multiple runs of the pipeline. The timestamp used is the number of seconds since epoch UTC. Args: export_dir_base: A string containing a directory to write the exported graph and checkpoints. Returns: The full path of the new subdirectory (which is not actually created yet). Raises: RuntimeError: if repeated attempts fail to obtain a unique timestamped directory name. """
attempts = 0 while attempts < MAX_DIRECTORY_CREATION_ATTEMPTS: export_timestamp = int(time.time()) export_dir = os.path.join( tf.compat.as_bytes(export_dir_base), tf.compat.as_bytes(str(export_timestamp))) if not tf_v1.gfile.Exists(export_dir): # Collisions are still possible (though extremely unlikely): this # directory is not actually created yet, but it will be almost # instantly on return from this function. return export_dir time.sleep(1) attempts += 1 logging.warn( "Export directory %s already exists; retrying (attempt %d/%d)", export_dir, attempts, MAX_DIRECTORY_CREATION_ATTEMPTS) raise RuntimeError("Failed to obtain a unique export directory name after " "%d attempts.".MAX_DIRECTORY_CREATION_ATTEMPTS)
<SYSTEM_TASK:> Builds a directory name based on the argument but starting with 'temp-'. <END_TASK> <USER_TASK:> Description: def get_temp_export_dir(timestamped_export_dir): """Builds a directory name based on the argument but starting with 'temp-'. This relies on the fact that TensorFlow Serving ignores subdirectories of the base directory that can't be parsed as integers. Args: timestamped_export_dir: the name of the eventual export directory, e.g. /foo/bar/<timestamp> Returns: A sister directory prefixed with 'temp-', e.g. /foo/bar/temp-<timestamp>. """
(dirname, basename) = os.path.split(timestamped_export_dir) temp_export_dir = os.path.join( tf.compat.as_bytes(dirname), tf.compat.as_bytes("temp-{}".format(basename))) return temp_export_dir
<SYSTEM_TASK:> Deletes older exports, retaining only a given number of the most recent. <END_TASK> <USER_TASK:> Description: def garbage_collect_exports(export_dir_base, exports_to_keep): """Deletes older exports, retaining only a given number of the most recent. Export subdirectories are assumed to be named with monotonically increasing integers; the most recent are taken to be those with the largest values. Args: export_dir_base: the base directory under which each export is in a versioned subdirectory. exports_to_keep: Number of exports to keep. Older exports will be garbage collected. Set to None to disable. """
if exports_to_keep is None: return version_paths = [] # List of tuples (version, path) for filename in tf_v1.gfile.ListDirectory(export_dir_base): path = os.path.join( tf.compat.as_bytes(export_dir_base), tf.compat.as_bytes(filename)) if len(filename) == 10 and filename.isdigit(): version_paths.append((int(filename), path)) oldest_version_path = sorted(version_paths)[:-exports_to_keep] for _, path in oldest_version_path: try: tf_v1.gfile.DeleteRecursively(path) except tf.errors.NotFoundError as e: logging.warn("Can not delete %s recursively: %s", path, e)
<SYSTEM_TASK:> Generate a human-readable string representing number of bytes. <END_TASK> <USER_TASK:> Description: def bytes_to_readable_str(num_bytes, include_b=False): """Generate a human-readable string representing number of bytes. The units B, kB, MB and GB are used. Args: num_bytes: (`int` or None) Number of bytes. include_b: (`bool`) Include the letter B at the end of the unit. Returns: (`str`) A string representing the number of bytes in a human-readable way, including a unit at the end. """
if num_bytes is None: return str(num_bytes) if num_bytes < 1024: result = "%d" % num_bytes elif num_bytes < 1048576: result = "%.2fk" % (num_bytes / float(1 << 10)) elif num_bytes < 1073741824: result = "%.2fM" % (num_bytes / float(1 << 20)) else: result = "%.2fG" % (num_bytes / float(1 << 30)) if include_b: result += "B" return result
<SYSTEM_TASK:> Generates a new release announcement entry in the docs. <END_TASK> <USER_TASK:> Description: def announce(version): """Generates a new release announcement entry in the docs."""
# Get our list of authors stdout = check_output(["git", "describe", "--abbrev=0", "--tags"]) stdout = stdout.decode("utf-8") last_version = stdout.strip() stdout = check_output( ["git", "log", "{}..HEAD".format(last_version), "--format=%aN"] ) stdout = stdout.decode("utf-8") contributors = set(stdout.splitlines()) template_name = ( "release.minor.rst" if version.endswith(".0") else "release.patch.rst" ) template_text = ( Path(__file__).parent.joinpath(template_name).read_text(encoding="UTF-8") ) contributors_text = ( "\n".join("* {}".format(name) for name in sorted(contributors)) + "\n" ) text = template_text.format(version=version, contributors=contributors_text) target = Path(__file__).parent.joinpath( "../doc/en/announce/release-{}.rst".format(version) ) target.write_text(text, encoding="UTF-8") print(f"{Fore.CYAN}[generate.announce] {Fore.RESET}Generated {target.name}") # Update index with the new release entry index_path = Path(__file__).parent.joinpath("../doc/en/announce/index.rst") lines = index_path.read_text(encoding="UTF-8").splitlines() indent = " " for index, line in enumerate(lines): if line.startswith("{}release-".format(indent)): new_line = indent + target.stem if line != new_line: lines.insert(index, new_line) index_path.write_text("\n".join(lines) + "\n", encoding="UTF-8") print( f"{Fore.CYAN}[generate.announce] {Fore.RESET}Updated {index_path.name}" ) else: print( f"{Fore.CYAN}[generate.announce] {Fore.RESET}Skip {index_path.name} (already contains release)" ) break check_call(["git", "add", str(target)])
<SYSTEM_TASK:> Sets the ca_bundle of this V1alpha1WebhookClientConfig. <END_TASK> <USER_TASK:> Description: def ca_bundle(self, ca_bundle): """ Sets the ca_bundle of this V1alpha1WebhookClientConfig. `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used. :param ca_bundle: The ca_bundle of this V1alpha1WebhookClientConfig. :type: str """
if ca_bundle is not None and not re.search('^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$', ca_bundle): raise ValueError("Invalid value for `ca_bundle`, must be a follow pattern or equal to `/^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$/`") self._ca_bundle = ca_bundle
<SYSTEM_TASK:> Sets the raw of this RuntimeRawExtension. <END_TASK> <USER_TASK:> Description: def raw(self, raw): """ Sets the raw of this RuntimeRawExtension. Raw is the underlying serialization of this object. :param raw: The raw of this RuntimeRawExtension. :type: str """
if raw is None: raise ValueError("Invalid value for `raw`, must not be `None`") if raw is not None and not re.search('^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$', raw): raise ValueError("Invalid value for `raw`, must be a follow pattern or equal to `/^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$/`") self._raw = raw
<SYSTEM_TASK:> Create thread pool on first request <END_TASK> <USER_TASK:> Description: def pool(self): """Create thread pool on first request avoids instantiating unused threadpool for blocking clients. """
if self._pool is None: self._pool = ThreadPool(self.pool_threads) return self._pool
<SYSTEM_TASK:> Sets the debug status. <END_TASK> <USER_TASK:> Description: def debug(self, value): """ Sets the debug status. :param value: The debug status, True or False. :type: bool """
self.__debug = value if self.__debug: # if debug status is True, turn on debug logging for _, logger in iteritems(self.logger): logger.setLevel(logging.DEBUG) # turn on httplib debug httplib.HTTPConnection.debuglevel = 1 else: # if debug status is False, turn off debug logging, # setting log level to default `logging.WARNING` for _, logger in iteritems(self.logger): logger.setLevel(logging.WARNING) # turn off httplib debug httplib.HTTPConnection.debuglevel = 0
<SYSTEM_TASK:> Sets the logger_format. <END_TASK> <USER_TASK:> Description: def logger_format(self, value): """ Sets the logger_format. The logger_formatter will be updated when sets logger_format. :param value: The format string. :type: str """
self.__logger_format = value self.logger_formatter = logging.Formatter(self.__logger_format)
<SYSTEM_TASK:> Sets the certificate of this V1beta1CertificateSigningRequestStatus. <END_TASK> <USER_TASK:> Description: def certificate(self, certificate): """ Sets the certificate of this V1beta1CertificateSigningRequestStatus. If request was approved, the controller will place the issued certificate here. :param certificate: The certificate of this V1beta1CertificateSigningRequestStatus. :type: str """
if certificate is not None and not re.search('^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$', certificate): raise ValueError("Invalid value for `certificate`, must be a follow pattern or equal to `/^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$/`") self._certificate = certificate
<SYSTEM_TASK:> Compresses and pickles given object to the given filename. <END_TASK> <USER_TASK:> Description: def save_object(filename, obj): """Compresses and pickles given object to the given filename."""
logging.info('saving {}...'.format(filename)) try: with gzip.GzipFile(filename, 'wb') as f: f.write(pickle.dumps(obj, 1)) except Exception as e: logging.error('save failure: {}'.format(e)) raise
<SYSTEM_TASK:> Unpickles and decompresses the given filename and returns the created object. <END_TASK> <USER_TASK:> Description: def load_object(filename): """Unpickles and decompresses the given filename and returns the created object."""
logging.info('loading {}...'.format(filename)) try: with gzip.GzipFile(filename, 'rb') as f: buf = '' while True: data = f.read() if data == '': break buf += data return pickle.loads(buf) except Exception as e: logging.error('load failure: {}'.format(e)) raise
<SYSTEM_TASK:> Returns the section heading for the issue, or None if this issue should be ignored. <END_TASK> <USER_TASK:> Description: def issue_section(issue): """Returns the section heading for the issue, or None if this issue should be ignored."""
labels = issue.get('labels', []) for label in labels: if not label['name'].startswith('type: '): continue if label['name'] in LOG_SECTION: return LOG_SECTION[label['name']] elif label['name'] in IGNORE_ISSUE_TYPE: return None else: logging.warning('unknown issue type: "{}" for: {}'.format(label['name'], issue_line(issue))) return None
<SYSTEM_TASK:> Returns list of tags for this issue. <END_TASK> <USER_TASK:> Description: def issue_tags(issue): """Returns list of tags for this issue."""
labels = issue.get('labels', []) return [label['name'].replace('tag: ', '') for label in labels if label['name'].startswith('tag: ')]
<SYSTEM_TASK:> Returns True iff this issue was closed after given date. If after not given, only checks if issue is closed. <END_TASK> <USER_TASK:> Description: def closed_issue(issue, after=None): """Returns True iff this issue was closed after given date. If after not given, only checks if issue is closed."""
if issue['state'] == 'closed': if after is None or parse_timestamp(issue['closed_at']) > after: return True return False
<SYSTEM_TASK:> Returns True iff this issue is something we should show in the changelog. <END_TASK> <USER_TASK:> Description: def relevent_issue(issue, after): """Returns True iff this issue is something we should show in the changelog."""
return (closed_issue(issue, after) and issue_completed(issue) and issue_section(issue))
<SYSTEM_TASK:> Yields unique set of issues given a list of issues. <END_TASK> <USER_TASK:> Description: def all_issues(issues): """Yields unique set of issues given a list of issues."""
logging.info('finding issues...') seen = set() for issue in issues: if issue['title'] not in seen: seen.add(issue['title']) yield issue