text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Returns a list of SplitInfo protos that we have. <END_TASK> <USER_TASK:> Description: def to_proto(self): """Returns a list of SplitInfo protos that we have."""
# Return the proto.SplitInfo, sorted by name return sorted((s.get_proto() for s in self.values()), key=lambda s: s.name)
<SYSTEM_TASK:> Returns a decorator which prevents concurrent calls to functions. <END_TASK> <USER_TASK:> Description: def build_synchronize_decorator(): """Returns a decorator which prevents concurrent calls to functions. Usage: synchronized = build_synchronize_decorator() @synchronized def read_value(): ... @synchronized def write_value(x): ... Returns: make_threadsafe (fct): The decorator which lock all functions to which it is applied under a same lock """
lock = threading.Lock() def lock_decorator(fn): @functools.wraps(fn) def lock_decorated(*args, **kwargs): with lock: return fn(*args, **kwargs) return lock_decorated return lock_decorator
<SYSTEM_TASK:> Returns file name of file at given url. <END_TASK> <USER_TASK:> Description: def get_file_name(url): """Returns file name of file at given url."""
return os.path.basename(urllib.parse.urlparse(url).path) or 'unknown_name'
<SYSTEM_TASK:> Make built-in Librispeech BuilderConfigs. <END_TASK> <USER_TASK:> Description: def _make_builder_configs(): """Make built-in Librispeech BuilderConfigs. Uses 4 text encodings (plain text, bytes, subwords with 8k vocab, subwords with 32k vocab) crossed with the data subsets (clean100, clean360, all). Returns: `list<tfds.audio.LibrispeechConfig>` """
text_encoder_configs = [ None, tfds.features.text.TextEncoderConfig( name="bytes", encoder=tfds.features.text.ByteTextEncoder()), tfds.features.text.TextEncoderConfig( name="subwords8k", encoder_cls=tfds.features.text.SubwordTextEncoder, vocab_size=2**13), tfds.features.text.TextEncoderConfig( name="subwords32k", encoder_cls=tfds.features.text.SubwordTextEncoder, vocab_size=2**15), ] version = "0.1.0" configs = [] for text_encoder_config in text_encoder_configs: for data in _DATA_OPTIONS: config = LibrispeechConfig( version=version, text_encoder_config=text_encoder_config, data=data) configs.append(config) return configs
<SYSTEM_TASK:> Walk a Librispeech directory and yield examples. <END_TASK> <USER_TASK:> Description: def _walk_librispeech_dir(directory): """Walk a Librispeech directory and yield examples."""
directory = os.path.join(directory, "LibriSpeech") for path, _, files in tf.io.gfile.walk(directory): if not files: continue transcript_file = [f for f in files if f.endswith(".txt")] if not transcript_file: continue assert len(transcript_file) == 1 transcript_file, = transcript_file transcripts = {} with tf.io.gfile.GFile(os.path.join(path, transcript_file)) as f: for line in f: line = line.strip() key, transcript = line.split(" ", 1) transcripts[key] = transcript audio_files = [f for f in files if not f.endswith(".txt")] for audio_file in audio_files: assert audio_file.endswith(".flac") key = audio_file[:-len(".flac")] transcript = transcripts[key] speaker_id, chapter_id = [int(el) for el in key.split("-")[:2]] yield LibrispeechExample( speaker_id=speaker_id, chapter_id=chapter_id, audio_file=os.path.join(path, audio_file), transcript=transcript)
<SYSTEM_TASK:> Returns download urls for this config. <END_TASK> <USER_TASK:> Description: def download_urls(self): """Returns download urls for this config."""
urls = { tfds.Split.TRAIN: ["train_clean100"], tfds.Split.VALIDATION: ["dev_clean"], tfds.Split.TEST: ["test_clean"], } if self.data in ["all", "clean360"]: urls[tfds.Split.TRAIN].append("train_clean360") if self.data == "all": urls[tfds.Split.TRAIN].extend(["train_clean360", "train_other500"]) urls[tfds.Split.VALIDATION].append("dev_other") urls[tfds.Split.TEST].append("test_other") urls = { split: [_DL_URLS[name] for name in names ] for split, names in urls.items() } return urls
<SYSTEM_TASK:> Conversion class name string => integer. <END_TASK> <USER_TASK:> Description: def str2int(self, str_value): """Conversion class name string => integer."""
str_value = tf.compat.as_text(str_value) if self._str2int: return self._str2int[str_value] # No names provided, try to integerize failed_parse = False try: int_value = int(str_value) except ValueError: failed_parse = True if failed_parse or not 0 <= int_value < self._num_classes: raise ValueError("Invalid string class label %s" % str_value) return int_value
<SYSTEM_TASK:> Conversion integer => class name string. <END_TASK> <USER_TASK:> Description: def int2str(self, int_value): """Conversion integer => class name string."""
if self._int2str: # Maybe should support batched np array/eager tensors, to allow things # like # out_ids = model(inputs) # labels = cifar10.info.features['label'].int2str(out_ids) return self._int2str[int_value] # No names provided, return str(int) if not 0 <= int_value < self._num_classes: raise ValueError("Invalid integer class label %d" % int_value) return tf.compat.as_text(str(int_value))
<SYSTEM_TASK:> Prepare tokens for encoding. <END_TASK> <USER_TASK:> Description: def _prepare_tokens_for_encode(tokens): """Prepare tokens for encoding. Tokens followed by a single space have "_" appended and the single space token is dropped. If a token is _UNDERSCORE_REPLACEMENT, it is broken up into 2 tokens. Args: tokens: `list<str>`, tokens to prepare. Returns: `list<str>` prepared tokens. """
prepared_tokens = [] def _prepare_token(t, next_t): skip_next = False t = _escape(t) # If next token is a single space, add _ suffix to token and skip the # empty space. if next_t == " ": t += "_" skip_next = True return t, skip_next next_tokens = tokens[1:] + [None] skip_single_token = False for token, next_token in zip(tokens, next_tokens): if skip_single_token: skip_single_token = False continue # If the user-supplied string contains the underscore replacement string, # break it into 2 tokens and encode those separately. if token == _UNDERSCORE_REPLACEMENT: t1, t2 = _UNDERSCORE_REPLACEMENT[:2], _UNDERSCORE_REPLACEMENT[2:] t1, _ = _prepare_token(t1, None) t2, _ = _prepare_token(t2, next_token) prepared_tokens.append(t1) prepared_tokens.append(t2) continue token, skip_single_token = _prepare_token(token, next_token) prepared_tokens.append(token) return prepared_tokens
<SYSTEM_TASK:> Encodes text into a list of integers. <END_TASK> <USER_TASK:> Description: def encode(self, s): """Encodes text into a list of integers."""
s = tf.compat.as_text(s) tokens = self._tokenizer.tokenize(s) tokens = _prepare_tokens_for_encode(tokens) ids = [] for token in tokens: ids.extend(self._token_to_ids(token)) return text_encoder.pad_incr(ids)
<SYSTEM_TASK:> Decodes a list of integers into text. <END_TASK> <USER_TASK:> Description: def decode(self, ids): """Decodes a list of integers into text."""
ids = text_encoder.pad_decr(ids) subword_ids = ids del ids subwords = [] # Some ids correspond to bytes. Because unicode characters are composed of # possibly multiple bytes, we attempt to decode contiguous lists of bytes # all together. Invalid byte sequences are replaced with the unicode # replacement (i.e. unknown) character U+FFFD. prev_bytes = [] def consume_prev_bytes(): if prev_bytes: bytestr = b"".join(prev_bytes) bytes_text = bytestr.decode("utf-8", "replace") subwords.append(bytes_text) return [] for subword_id in subword_ids: subword = self._id_to_subword(subword_id) if isinstance(subword, six.binary_type): # Byte-encoded prev_bytes.append(subword) else: # If there were bytes previously, convert to unicode. prev_bytes = consume_prev_bytes() trimmed, add_space = _trim_underscore_and_tell(subword) subwords.append(trimmed) if add_space: subwords.append(" ") # If there were trailing bytes, convert to unicode. prev_bytes = consume_prev_bytes() return tf.compat.as_text("".join(subwords))
<SYSTEM_TASK:> Convert a single token to a list of integer ids. <END_TASK> <USER_TASK:> Description: def _token_to_ids(self, token): """Convert a single token to a list of integer ids."""
# Check cache cache_location = hash(token) % self._cache_size cache_key, cache_value = self._token_to_ids_cache[cache_location] if cache_key == token: return cache_value subwords = self._token_to_subwords(token) ids = [] for subword in subwords: if subword == _UNDERSCORE_REPLACEMENT: ids.append(len(self._subwords) + ord("_")) continue subword_id = self._subword_to_id.get(subword) if subword_id is None: # Byte-encode ids.extend(self._byte_encode(subword)) else: ids.append(subword_id) # Update cache self._token_to_ids_cache[cache_location] = (token, ids) return ids
<SYSTEM_TASK:> Encode a single token byte-wise into integer ids. <END_TASK> <USER_TASK:> Description: def _byte_encode(self, token): """Encode a single token byte-wise into integer ids."""
# Vocab ids for all bytes follow ids for the subwords offset = len(self._subwords) if token == "_": return [len(self._subwords) + ord(" ")] return [i + offset for i in list(bytearray(tf.compat.as_bytes(token)))]
<SYSTEM_TASK:> Initializes the encoder from a list of subwords. <END_TASK> <USER_TASK:> Description: def _init_from_list(self, subwords): """Initializes the encoder from a list of subwords."""
subwords = [tf.compat.as_text(s) for s in subwords if s] self._subwords = subwords # Note that internally everything is 0-indexed. Padding is dealt with at the # end of encode and the beginning of decode. self._subword_to_id = {s: i for i, s in enumerate(subwords)} # We remember the maximum length of any subword to avoid having to # check arbitrarily long strings. self._max_subword_len = max( len(_UNDERSCORE_REPLACEMENT), max([len(s) for s in subwords] or [1])) # Initialize the cache self._cache_size = 2**20 self._token_to_ids_cache = [(None, None)] * self._cache_size # Setup tokenizer # Reserved tokens are all tokens that are mixed alphanum and non-alphanum. reserved_tokens = set([_UNDERSCORE_REPLACEMENT]) for t in self._subwords: if text_encoder.is_mixed_alphanum(t): reserved_tokens.add(t) self._tokenizer = text_encoder.Tokenizer( alphanum_only=False, reserved_tokens=reserved_tokens)
<SYSTEM_TASK:> Save the vocabulary to a file. <END_TASK> <USER_TASK:> Description: def save_to_file(self, filename_prefix): """Save the vocabulary to a file."""
# Wrap in single quotes to make it easier to see the full subword when # it has spaces and make it easier to search with ctrl+f. filename = self._filename(filename_prefix) lines = ["'%s'" % s for s in self._subwords] self._write_lines_to_file(filename, lines)
<SYSTEM_TASK:> Extracts list of subwords from file. <END_TASK> <USER_TASK:> Description: def load_from_file(cls, filename_prefix): """Extracts list of subwords from file."""
filename = cls._filename(filename_prefix) lines, _ = cls._read_lines_from_file(filename) # Strip wrapping single quotes vocab_list = [line[1:-1] for line in lines] return cls(vocab_list=vocab_list)
<SYSTEM_TASK:> Builds a `SubwordTextEncoder` based on the `corpus_generator`. <END_TASK> <USER_TASK:> Description: def build_from_corpus(cls, corpus_generator, target_vocab_size, max_subword_length=20, max_corpus_chars=None, reserved_tokens=None): """Builds a `SubwordTextEncoder` based on the `corpus_generator`. Args: corpus_generator: generator yielding `str`, from which subwords will be constructed. target_vocab_size: `int`, approximate size of the vocabulary to create. max_subword_length: `int`, maximum length of a subword. Note that memory and compute scale quadratically in the length of the longest token. max_corpus_chars: `int`, the maximum number of characters to consume from `corpus_generator` for the purposes of building the subword vocabulary. reserved_tokens: `list<str>`, list of tokens that will always be treated as whole tokens and not split up. Note that these must contain a mix of alphanumeric and non-alphanumeric characters (e.g. "<EOS>") and not end in an underscore. Returns: `SubwordTextEncoder`. """
reserved_tokens = reserved_tokens or [] _validate_build_arguments( max_subword_length=max_subword_length, reserved_tokens=reserved_tokens, target_vocab_size=target_vocab_size) token_counts = _token_counts_from_generator( generator=corpus_generator, max_chars=max_corpus_chars, reserved_tokens=reserved_tokens) # Binary search on the minimum token count to build a vocabulary with # approximately the right size def _binary_search(min_token_count, max_token_count): """Binary search min_token_count to build SubwordTextEncoder vocab.""" candidate_min = (min_token_count + max_token_count) // 2 logging.info("SubwordTextEncoder build: trying min_token_count %d", candidate_min) encoder = cls._build_from_token_counts( token_counts=token_counts, min_token_count=candidate_min, reserved_tokens=reserved_tokens, num_iterations=4, max_subword_length=max_subword_length) vocab_size = encoder.vocab_size # Being within 1% of the target vocab size is ok target_achieved = ( abs(vocab_size - target_vocab_size) * 100 < target_vocab_size) if (target_achieved or min_token_count >= max_token_count or candidate_min <= 1): # Search complete return encoder # Recurse if vocab_size > target_vocab_size: next_encoder = _binary_search(candidate_min + 1, max_token_count) else: next_encoder = _binary_search(min_token_count, candidate_min - 1) # Return the one that's closest to the target_vocab_size if (abs(vocab_size - target_vocab_size) < abs(next_encoder.vocab_size - target_vocab_size)): return encoder else: return next_encoder # Get min and max token counts. min_token_count = max(min(token_counts.values()), 1) max_token_count = max(token_counts.values()) # Another option could be to do a binary search over *ranks* of the tokens. return _binary_search(min_token_count, max_token_count)
<SYSTEM_TASK:> Generate features given the directory path. <END_TASK> <USER_TASK:> Description: def _generate_examples(self, file_path): """Generate features given the directory path. Args: file_path: path where the csv file is stored Yields: The features, per row. """
fieldnames = [ 'class_label', 'lepton_pT', 'lepton_eta', 'lepton_phi', 'missing_energy_magnitude', 'missing_energy_phi', 'jet_1_pt', 'jet_1_eta', 'jet_1_phi', 'jet_1_b-tag', 'jet_2_pt', 'jet_2_eta', 'jet_2_phi', 'jet_2_b-tag', 'jet_3_pt', 'jet_3_eta', 'jet_3_phi', 'jet_3_b-tag', 'jet_4_pt', 'jet_4_eta', 'jet_4_phi', 'jet_4_b-tag', 'm_jj', 'm_jjj', 'm_lv', 'm_jlv', 'm_bb', 'm_wbb', 'm_wwbb' ] with tf.io.gfile.GFile(file_path) as csvfile: reader = csv.DictReader(csvfile, fieldnames=fieldnames) for row in reader: yield row
<SYSTEM_TASK:> Generate Cats vs Dogs images and labels given a directory path. <END_TASK> <USER_TASK:> Description: def _generate_examples(self, archive): """Generate Cats vs Dogs images and labels given a directory path."""
num_skipped = 0 for fname, fobj in archive: res = _NAME_RE.match(fname) if not res: # README file, ... continue label = res.group(1).lower() if tf.compat.as_bytes("JFIF") not in fobj.peek(10): num_skipped += 1 continue yield { "image": fobj, "image/filename": fname, "label": label, } if num_skipped != _NUM_CORRUPT_IMAGES: raise ValueError("Expected %d corrupt images, but found %d" % ( _NUM_CORRUPT_IMAGES, num_skipped)) logging.warning("%d images were corrupted and were skipped", num_skipped)
<SYSTEM_TASK:> Loads a data chunk as specified by the paths. <END_TASK> <USER_TASK:> Description: def _load_chunk(dat_path, cat_path, info_path): """Loads a data chunk as specified by the paths. Args: dat_path: Path to dat file of the chunk. cat_path: Path to cat file of the chunk. info_path: Path to info file of the chunk. Returns: Tuple with the dat, cat, info_arrays. """
dat_array = read_binary_matrix(dat_path) # Even if the image is gray scale, we need to add an extra channel dimension # to be compatible with tfds.features.Image. dat_array = np.expand_dims(dat_array, -1) cat_array = read_binary_matrix(cat_path) info_array = read_binary_matrix(info_path) info_array = np.copy(info_array) # Make read-only buffer array writable. # Azimuth values are 0, 2, 4, .., 34. We divide by 2 to get proper labels. info_array[:, 2] = info_array[:, 2] / 2 return dat_array, cat_array, info_array
<SYSTEM_TASK:> Reads and returns binary formatted matrix stored in filename. <END_TASK> <USER_TASK:> Description: def read_binary_matrix(filename): """Reads and returns binary formatted matrix stored in filename. The file format is described on the data set page: https://cs.nyu.edu/~ylclab/data/norb-v1.0-small/ Args: filename: String with path to the file. Returns: Numpy array contained in the file. """
with tf.io.gfile.GFile(filename, "rb") as f: s = f.read() # Data is stored in little-endian byte order. int32_dtype = np.dtype("int32").newbyteorder("<") # The first 4 bytes contain a magic code that specifies the data type. magic = int(np.frombuffer(s, dtype=int32_dtype, count=1)) if magic == 507333717: data_dtype = np.dtype("uint8") # uint8 does not have a byte order. elif magic == 507333716: data_dtype = np.dtype("int32").newbyteorder("<") else: raise ValueError("Invalid magic value for data type!") # The second 4 bytes contain an int32 with the number of dimensions of the # stored array. ndim = int(np.frombuffer(s, dtype=int32_dtype, count=1, offset=4)) # The next ndim x 4 bytes contain the shape of the array in int32. dims = np.frombuffer(s, dtype=int32_dtype, count=ndim, offset=8) # If the array has less than three dimensions, three int32 are still used to # save the shape info (remaining int32 are simply set to 1). The shape info # hence uses max(3, ndim) bytes. bytes_used_for_shape_info = max(3, ndim) * 4 # The remaining bytes are the array. data = np.frombuffer( s, dtype=data_dtype, offset=8 + bytes_used_for_shape_info) return data.reshape(tuple(dims))
<SYSTEM_TASK:> Generate examples for the Smallnorb dataset. <END_TASK> <USER_TASK:> Description: def _generate_examples(self, dat_path, cat_path, info_path): """Generate examples for the Smallnorb dataset. Args: dat_path: Path to dat file of the chunk. cat_path: Path to cat file of the chunk. info_path: Path to info file of the chunk. Yields: Dictionaries with images and the different labels. """
dat_arr, cat_arr, info_arr = _load_chunk(dat_path, cat_path, info_path) for image, category, info_vec in moves.zip(dat_arr, cat_arr, info_arr): yield { "image": image[0], "image2": image[1], "label_category": category, "instance": info_vec[0], "label_elevation": info_vec[1], "label_azimuth": info_vec[2], "label_lighting": info_vec[3], }
<SYSTEM_TASK:> Constructs a `tf.data.Dataset` from TFRecord files. <END_TASK> <USER_TASK:> Description: def build_dataset(instruction_dicts, dataset_from_file_fn, shuffle_files=False, parallel_reads=64): """Constructs a `tf.data.Dataset` from TFRecord files. Args: instruction_dicts: `list` of {'filepath':, 'mask':, 'offset_mask':} containing the information about which files and which examples to use. The boolean mask will be repeated and zipped with the examples from filepath. dataset_from_file_fn: function returning a `tf.data.Dataset` given a filename. shuffle_files: `bool`, Whether to shuffle the input filenames. parallel_reads: `int`, how many files to read in parallel. Returns: `tf.data.Dataset` """
# First case: All examples are taken (No value skipped) if _no_examples_skipped(instruction_dicts): # Only use the filenames as instruction instruction_ds = tf.data.Dataset.from_tensor_slices([ d["filepath"] for d in instruction_dicts ]) build_ds_from_instruction = dataset_from_file_fn # Second case: Use the instructions to read the examples else: instruction_ds = _build_instruction_ds(instruction_dicts) build_ds_from_instruction = functools.partial( _build_ds_from_instruction, ds_from_file_fn=dataset_from_file_fn, ) # If shuffle is True, we shuffle the instructions/shards if shuffle_files: instruction_ds = instruction_ds.shuffle(len(instruction_dicts)) # Use interleave to parallel read files and decode records ds = instruction_ds.interleave( build_ds_from_instruction, cycle_length=parallel_reads, num_parallel_calls=tf.data.experimental.AUTOTUNE) return ds
<SYSTEM_TASK:> Create a dataset containing individual instruction for each shard. <END_TASK> <USER_TASK:> Description: def _build_instruction_ds(instructions): """Create a dataset containing individual instruction for each shard. Each instruction is a dict: ``` { "filepath": tf.Tensor(shape=(), dtype=tf.string), "mask_offset": tf.Tensor(shape=(), dtype=tf.int64), "mask": tf.Tensor(shape=(100,), dtype=tf.bool), } ``` Args: instructions: `list[dict]`, the list of instruction dict Returns: instruction_ds: The dataset containing the instruction. The dataset size is the number of shard. """
# Transpose the list[dict] into dict[list] tensor_inputs = { # offset_mask need to be converted to int64 explicitly k: np.array(vals, dtype=np.int64) if k == "mask_offset" else list(vals) for k, vals in utils.zip_dict(*instructions) } return tf.data.Dataset.from_tensor_slices(tensor_inputs)
<SYSTEM_TASK:> Build the mask dataset to indicate which element to skip. <END_TASK> <USER_TASK:> Description: def _build_mask_ds(mask, mask_offset): """Build the mask dataset to indicate which element to skip. Args: mask: `tf.Tensor`, binary mask to apply to all following elements. This mask should have a length 100. mask_offset: `tf.Tensor`, Integer specifying from how much the mask should be shifted for the first element. Returns: mask_ds: `tf.data.Dataset`, a dataset returning False for examples to skip and True for examples to keep. """
mask_ds = tf.data.Dataset.from_tensor_slices(mask) mask_ds = mask_ds.repeat() mask_ds = mask_ds.skip(mask_offset) return mask_ds
<SYSTEM_TASK:> Map an instruction to a real datasets for one particular shard. <END_TASK> <USER_TASK:> Description: def _build_ds_from_instruction(instruction, ds_from_file_fn): """Map an instruction to a real datasets for one particular shard. Args: instruction: A `dict` of `tf.Tensor` containing the instruction to load the particular shard (filename, mask,...) ds_from_file_fn: `fct`, function which returns the dataset associated to the filename Returns: dataset: `tf.data.Dataset`, The shard loaded from the instruction """
# Create the example and mask ds for this particular shard examples_ds = ds_from_file_fn(instruction["filepath"]) mask_ds = _build_mask_ds( mask_offset=instruction["mask_offset"], mask=instruction["mask"], ) # Zip the mask and real examples ds = tf.data.Dataset.zip((examples_ds, mask_ds)) # Filter according to the mask (only keep True) ds = ds.filter(lambda example, mask: mask) # Only keep the examples ds = ds.map(lambda example, mask: example) return ds
<SYSTEM_TASK:> Converts a `tf.data.Dataset` to an iterable of NumPy arrays. <END_TASK> <USER_TASK:> Description: def as_numpy(dataset, graph=None): """Converts a `tf.data.Dataset` to an iterable of NumPy arrays. `as_numpy` converts a possibly nested structure of `tf.data.Dataset`s and `tf.Tensor`s to iterables of NumPy arrays and NumPy arrays, respectively. Args: dataset: a possibly nested structure of `tf.data.Dataset`s and/or `tf.Tensor`s. graph: `tf.Graph`, optional, explicitly set the graph to use. Returns: A structure matching `dataset` where `tf.data.Dataset`s are converted to generators of NumPy arrays and `tf.Tensor`s are converted to NumPy arrays. """
nested_ds = dataset del dataset # Flatten flat_ds = tf.nest.flatten(nested_ds) flat_np = [] # Type check for Tensors and Datasets for ds_el in flat_ds: types = [type(el) for el in flat_ds] types = tf.nest.pack_sequence_as(nested_ds, types) if not (isinstance(ds_el, tf.Tensor) or tf_compat.is_dataset(ds_el)): raise ValueError("Arguments to as_numpy must be tf.Tensors or " "tf.data.Datasets. Got: %s" % types) if tf.executing_eagerly(): # Eager mode for ds_el in flat_ds: if isinstance(ds_el, tf.Tensor): np_el = ds_el.numpy() elif tf_compat.is_dataset(ds_el): np_el = _eager_dataset_iterator(ds_el) else: assert False flat_np.append(np_el) else: # Graph mode # First create iterators for datasets with utils.maybe_with_graph(graph, create_if_none=False): ds_iters = [ tf.compat.v1.data.make_one_shot_iterator(ds_el).get_next() for ds_el in flat_ds if tf_compat.is_dataset(ds_el) ] ds_iters = [_graph_dataset_iterator(ds_iter, graph) for ds_iter in ds_iters] # Then create numpy arrays for tensors with utils.nogpu_session(graph) as sess: # Shared session for tf.Tensor # Calling sess.run once so that randomness is shared. np_arrays = sess.run([tensor for tensor in flat_ds if not tf_compat.is_dataset(tensor)]) # Merge the dataset iterators and np arrays iter_ds = iter(ds_iters) iter_array = iter(np_arrays) flat_np = [ next(iter_ds) if tf_compat.is_dataset(ds_el) else next(iter_array) for ds_el in flat_ds ] # Nest return tf.nest.pack_sequence_as(nested_ds, flat_np)
<SYSTEM_TASK:> Loads the images and latent values into Numpy arrays. <END_TASK> <USER_TASK:> Description: def _load_data(filepath): """Loads the images and latent values into Numpy arrays."""
with h5py.File(filepath, "r") as h5dataset: image_array = np.array(h5dataset["images"]) # The 'label' data set in the hdf5 file actually contains the float values # and not the class labels. values_array = np.array(h5dataset["labels"]) return image_array, values_array
<SYSTEM_TASK:> Generate examples for the Shapes3d dataset. <END_TASK> <USER_TASK:> Description: def _generate_examples(self, filepath): """Generate examples for the Shapes3d dataset. Args: filepath: path to the Shapes3d hdf5 file. Yields: Dictionaries with images and the different labels. """
# Simultaneously iterating through the different data sets in the hdf5 # file will be slow with a single file. Instead, we first load everything # into memory before yielding the samples. image_array, values_array = _load_data(filepath) # We need to calculate the class labels from the float values in the file. labels_array = np.zeros_like(values_array, dtype=np.int64) for i in range(values_array.shape[1]): labels_array[:, i] = _discretize(values_array[:, i]) # pylint: disable=unsupported-assignment-operation for image, labels, values in moves.zip(image_array, labels_array, values_array): yield { "image": image, "label_floor_hue": labels[0], "label_wall_hue": labels[1], "label_object_hue": labels[2], "label_scale": labels[3], "label_shape": labels[4], "label_orientation": labels[5], "value_floor_hue": values[0], "value_wall_hue": values[1], "value_object_hue": values[2], "value_scale": values[3], "value_shape": values[4], "value_orientation": values[5], }
<SYSTEM_TASK:> Strips formatting and unwanted sections from raw page content. <END_TASK> <USER_TASK:> Description: def _parse_and_clean_wikicode(raw_content): """Strips formatting and unwanted sections from raw page content."""
wikicode = tfds.core.lazy_imports.mwparserfromhell.parse(raw_content) # Filters for references, tables, and file/image links. re_rm_wikilink = re.compile( "^(?:File|Image|Media):", flags=re.IGNORECASE | re.UNICODE) def rm_wikilink(obj): return bool(re_rm_wikilink.match(six.text_type(obj.title))) def rm_tag(obj): return six.text_type(obj.tag) in {"ref", "table"} def rm_template(obj): return obj.name.lower() in { "reflist", "notelist", "notelist-ua", "notelist-lr", "notelist-ur", "notelist-lg"} def try_remove_obj(obj, section): try: section.remove(obj) except ValueError: # For unknown reasons, objects are sometimes not found. pass section_text = [] # Filter individual sections to clean. for section in wikicode.get_sections( flat=True, include_lead=True, include_headings=True): for obj in section.ifilter_wikilinks(matches=rm_wikilink, recursive=True): try_remove_obj(obj, section) for obj in section.ifilter_templates(matches=rm_template, recursive=True): try_remove_obj(obj, section) for obj in section.ifilter_tags(matches=rm_tag, recursive=True): try_remove_obj(obj, section) section_text.append(section.strip_code().strip()) return "\n\n".join(section_text)
<SYSTEM_TASK:> Generate data for a given dataset. <END_TASK> <USER_TASK:> Description: def download_and_prepare(builder): """Generate data for a given dataset."""
print("download_and_prepare for dataset {}...".format(builder.info.full_name)) dl_config = download_config() if isinstance(builder, tfds.core.BeamBasedBuilder): beam = tfds.core.lazy_imports.apache_beam # TODO(b/129149715): Restore compute stats. Currently skipped because not # beam supported. dl_config.compute_stats = tfds.download.ComputeStatsMode.SKIP dl_config.beam_options = beam.options.pipeline_options.PipelineOptions() builder.download_and_prepare( download_dir=FLAGS.download_dir, download_config=dl_config, ) termcolor.cprint(str(builder.info.as_proto), attrs=["bold"]) if FLAGS.debug: dataset = builder.as_dataset(split=tfds.Split.TRAIN) pdb.set_trace() del dataset
<SYSTEM_TASK:> Generate CIFAR examples as dicts. <END_TASK> <USER_TASK:> Description: def _generate_examples(self, filepaths): """Generate CIFAR examples as dicts. Shared across CIFAR-{10, 100}. Uses self._cifar_info as configuration. Args: filepaths (list[str]): The files to use to generate the data. Yields: The cifar examples, as defined in the dataset info features. """
label_keys = self._cifar_info.label_keys for path in filepaths: for labels, np_image in _load_data(path, len(label_keys)): row = dict(zip(label_keys, labels)) row["image"] = np_image yield row
<SYSTEM_TASK:> Requires function to be called using keyword arguments. <END_TASK> <USER_TASK:> Description: def disallow_positional_args(wrapped=None, allowed=None): """Requires function to be called using keyword arguments."""
# See # https://wrapt.readthedocs.io/en/latest/decorators.html#decorators-with-optional-arguments # for decorator pattern. if wrapped is None: return functools.partial(disallow_positional_args, allowed=allowed) @wrapt.decorator def disallow_positional_args_dec(fn, instance, args, kwargs): ismethod = instance is not None _check_no_positional(fn, args, ismethod, allowed=allowed) _check_required(fn, kwargs) return fn(*args, **kwargs) return disallow_positional_args_dec(wrapped)
<SYSTEM_TASK:> Download a file from GCS, optionally to a file. <END_TASK> <USER_TASK:> Description: def download_gcs_file(path, out_fname=None, prefix_filter=None): """Download a file from GCS, optionally to a file."""
url = posixpath.join(GCS_BUCKET, path) if prefix_filter: url += "?prefix=%s" % prefix_filter stream = bool(out_fname) resp = requests.get(url, stream=stream) if not resp.ok: raise ValueError("GCS bucket inaccessible") if out_fname: with tf.io.gfile.GFile(out_fname, "wb") as f: for chunk in resp.iter_content(1024): f.write(chunk) else: return resp.content
<SYSTEM_TASK:> Run kaggle command with subprocess. <END_TASK> <USER_TASK:> Description: def _run_kaggle_command(command_args, competition_name): """Run kaggle command with subprocess."""
try: output = sp.check_output(command_args) return tf.compat.as_text(output) except sp.CalledProcessError as err: output = err.output _log_command_output(output, error=True) if output.startswith(b"404"): logging.error(_NOT_FOUND_ERR_MSG, competition_name) raise logging.error(_ERR_MSG, competition_name) raise
<SYSTEM_TASK:> List of competition files. <END_TASK> <USER_TASK:> Description: def competition_files(self): """List of competition files."""
command = [ "kaggle", "datasets" if "/" in self._competition_name else "competitions", "files", "-v", self._competition_name, ] output = _run_kaggle_command(command, self._competition_name) return sorted([ line.split(",")[0] for line in output.split("\n")[1:] if line ])
<SYSTEM_TASK:> Downloads competition file to output_dir. <END_TASK> <USER_TASK:> Description: def download_file(self, fname, output_dir): """Downloads competition file to output_dir."""
if fname not in self.competition_files: # pylint: disable=unsupported-membership-test raise ValueError("%s is not one of the competition's " "files: %s" % (fname, self.competition_files)) command = [ "kaggle", "competitions", "download", "--file", fname, "--path", output_dir, "-c", self._competition_name, ] _run_kaggle_command(command, self._competition_name) return os.path.join(output_dir, fname)
<SYSTEM_TASK:> Generate flower images and labels given the image directory path. <END_TASK> <USER_TASK:> Description: def _generate_examples(self, images_dir_path): """Generate flower images and labels given the image directory path. Args: images_dir_path: path to the directory where the images are stored. Yields: The image path and its corresponding label. """
parent_dir = tf.io.gfile.listdir(images_dir_path)[0] walk_dir = os.path.join(images_dir_path, parent_dir) dirs = tf.io.gfile.listdir(walk_dir) for d in dirs: if tf.io.gfile.isdir(os.path.join(walk_dir, d)): for full_path, _, fname in tf.io.gfile.walk(os.path.join(walk_dir, d)): for image_file in fname: if image_file.endswith(".jpg"): image_path = os.path.join(full_path, image_file) yield { "image": image_path, "label": d.lower(), }
<SYSTEM_TASK:> Returns path to where checksums are stored for a given dataset. <END_TASK> <USER_TASK:> Description: def _get_path(dataset_name): """Returns path to where checksums are stored for a given dataset."""
path = _checksum_paths().get(dataset_name, None) if path: return path msg = ('No checksums file could be find for dataset %s. Please create one in ' 'one of: %s') % (dataset_name, ', '.join(_CHECKSUM_DIRS)) raise AssertionError(msg)
<SYSTEM_TASK:> Store given checksums and sizes for specific dataset. <END_TASK> <USER_TASK:> Description: def store_checksums(dataset_name, sizes_checksums): """Store given checksums and sizes for specific dataset. Content of file is never disgarded, only updated. This is to ensure that if process is killed right after first download finishes, checksums registered during previous runs aren't lost. It is the responsibility of the caller not to call function multiple times in parallel for a given dataset. Only original file content is updated. This means the entire set of new sizes and checksums must be given at every call. Args: dataset_name: string. sizes_checksums: dict, {url: (size_in_bytes, checksum)}. """
path = _get_path(dataset_name) original_data = _get_sizes_checksums(path) new_data = original_data.copy() new_data.update(sizes_checksums) if original_data == new_data: return with tf.io.gfile.GFile(path, 'w') as f: for url, (size, checksum) in sorted(new_data.items()): f.write('%s %s %s\n' % (url, size, checksum))
<SYSTEM_TASK:> Sanitize and shorten url to fit in max_length. <END_TASK> <USER_TASK:> Description: def _sanitize_url(url, max_length): """Sanitize and shorten url to fit in max_length. Function is stable: same input MUST ALWAYS give same result, accros changes in code as well. Different URLs might give same result. As much as possible, the extension should be kept. Heuristics are applied to only keep useful info from url. 1- Drop generic [sub]domains. 'www.cs.toronto.edu/...' -> 'cs.toronto.edu/...' 'storage.googleapis.com/foo/...' -> 'foo/...' 'drive.google.com/bar/...' -> 'bar/...' 'github.com/baz/...' -> 'baz/...' 2- Remove leading '0's from url components: 'foo/train-00004-of-00010.tfrecords' -> 'foo/train-4-of-10.tfrecords' 3- Truncate each component of url until total size fits or each component is left with 4 chars (or total size is <= limit): 'MoveUnitToBorder_64x64_png/train-4-of-10.tfrecords' (here truncate components to 4 chars per component max) -> 'Move_64x6_png/trai-4-of-10.tfrecords' 4- Truncate result, keeping prefix: 'abc_def_ghi_jkl' -> 'abc_def' Args: url: string, url to sanitize and shorten. max_length: int, max length of result. Returns: (string, string): sanitized and shorted url, file extension. """
url = urllib.parse.urlparse(url) netloc = url.netloc for prefix in _NETLOC_COMMON_PREFIXES: if netloc.startswith(prefix): netloc = netloc[len(prefix):] for suffix in _NETLOC_COMMON_SUFFIXES: if netloc.endswith(suffix): netloc = netloc[:-len(suffix)] url = '%s%s%s%s' % (netloc, url.path, url.params, url.query) # Get the extension: for ext in _KNOWN_EXTENSIONS: if url.endswith(ext): extension = ext url = url[:-len(extension)] break else: url, extension = os.path.splitext(url) max_length -= len(extension) # Replace non authorized chars (including '/') by '_': url = re.sub(r'[^a-zA-Z0-9\.\-_]+', '_', url) # Remove parts with no info: for common_part in _URL_COMMON_PARTS: url = url.replace(common_part, '_') url = url.strip('_') # Remove leading zeros in groups of numbers: url = re.sub('(?<![0-9])0+(?=[0-9])', '', url) # Decrease max size of URL components: c_size = max(len(c) for c in re.split(r'[\.\-_]', url)) while c_size > 4 and len(url) > max_length: c_size -= 1 url = re.sub(r'[^\.\-_]{4,}', lambda match: match.group(0)[:c_size], url) return url[:max_length], extension
<SYSTEM_TASK:> Returns name of temp dir for given url. <END_TASK> <USER_TASK:> Description: def get_dl_dirname(url): """Returns name of temp dir for given url."""
checksum = hashlib.sha256(tf.compat.as_bytes(url)).hexdigest() return get_dl_fname(url, checksum)
<SYSTEM_TASK:> Returns info dict or None. <END_TASK> <USER_TASK:> Description: def _read_info(info_path): """Returns info dict or None."""
if not tf.io.gfile.exists(info_path): return None with tf.io.gfile.GFile(info_path) as info_f: return json.load(info_f)
<SYSTEM_TASK:> Write the INFO file next to local file. <END_TASK> <USER_TASK:> Description: def write_info_file(resource, path, dataset_name, original_fname): """Write the INFO file next to local file. Although the method is synchronized, there is still a risk two processes running at the same time overlap here. Risk accepted, since potentially lost data (`dataset_name`) is only for human consumption. Args: resource: resource for which to write the INFO file. path: path of downloaded file. dataset_name: data used to dl the file. original_fname: name of file as downloaded. """
info_path = _get_info_path(path) info = _read_info(info_path) or {} urls = set(info.get('urls', []) + [resource.url]) dataset_names = info.get('dataset_names', []) if dataset_name: dataset_names.append(dataset_name) if 'original_fname' in info and info['original_fname'] != original_fname: raise AssertionError( '`original_fname` "%s" stored in %s does NOT match "%s".' % ( info['original_fname'], info_path, original_fname)) info = dict(urls=list(urls), dataset_names=list(set(dataset_names)), original_fname=original_fname) with py_utils.atomic_write(info_path, 'w') as info_f: json.dump(info, info_f, sort_keys=True)
<SYSTEM_TASK:> Returns whether the resource exists locally, at `resource.path`. <END_TASK> <USER_TASK:> Description: def exists_locally(cls, path): """Returns whether the resource exists locally, at `resource.path`."""
# If INFO file doesn't exist, consider resource does NOT exist, as it would # prevent guessing the `extract_method`. return (tf.io.gfile.exists(path) and tf.io.gfile.exists(_get_info_path(path)))
<SYSTEM_TASK:> Call SubwordTextEncoder.build_from_corpus is encoder_cls is such. <END_TASK> <USER_TASK:> Description: def maybe_build_from_corpus(self, corpus_generator, **kwargs): """Call SubwordTextEncoder.build_from_corpus is encoder_cls is such."""
if self._encoder_cls is not text_lib.SubwordTextEncoder: return if self.encoder: return vocab_size = self._encoder_config.vocab_size self.encoder = text_lib.SubwordTextEncoder.build_from_corpus( corpus_generator=corpus_generator, target_vocab_size=vocab_size, **kwargs)
<SYSTEM_TASK:> Sharded filenames given prefix and number of shards. <END_TASK> <USER_TASK:> Description: def sharded_filenames(filename_prefix, num_shards): """Sharded filenames given prefix and number of shards."""
shard_suffix = "%05d-of-%05d" return [ "%s-%s" % (filename_prefix, shard_suffix % (i, num_shards)) for i in range(num_shards) ]
<SYSTEM_TASK:> Walk an Omniglot directory and yield examples. <END_TASK> <USER_TASK:> Description: def _walk_omniglot_dir(directory): """Walk an Omniglot directory and yield examples."""
directory = os.path.join(directory, tf.io.gfile.listdir(directory)[0]) alphabets = sorted(tf.io.gfile.listdir(directory)) for alphabet in alphabets: alphabet_dir = os.path.join(directory, alphabet) characters = sorted(tf.io.gfile.listdir(alphabet_dir)) for character in characters: character_id = int(character[len("character"):]) - 1 character_dir = os.path.join(alphabet_dir, character) images = tf.io.gfile.listdir(character_dir) for image in images: label, _ = image.split("_") label = int(label) - 1 image_path = os.path.join(character_dir, image) yield alphabet, character_id, label, image_path
<SYSTEM_TASK:> Get alphabet and label names, union across all dirs. <END_TASK> <USER_TASK:> Description: def _get_names(dirs): """Get alphabet and label names, union across all dirs."""
alphabets = set() label_names = {} for d in dirs: for example in _walk_omniglot_dir(d): alphabet, alphabet_char_id, label, _ = example alphabets.add(alphabet) label_name = "%s_%d" % (alphabet, alphabet_char_id) if label in label_names: assert label_names[label] == label_name else: label_names[label] = label_name label_names = [label_names[k] for k in sorted(label_names)] return alphabets, label_names
<SYSTEM_TASK:> Returns a human readable size string. <END_TASK> <USER_TASK:> Description: def size_str(size_in_bytes): """Returns a human readable size string. If size_in_bytes is None, then returns "?? GiB". For example `size_str(1.5 * tfds.units.GiB) == "1.50 GiB"`. Args: size_in_bytes: `int` or `None`, the size, in bytes, that we want to format as a human-readable size string. """
if not size_in_bytes: return "?? GiB" size_in_bytes = float(size_in_bytes) for (name, size_bytes) in _NAME_LIST: value = size_in_bytes / size_bytes if value >= 1.0: return "{:.2f} {}".format(value, name) return "{} {}".format(int(size_in_bytes), "bytes")
<SYSTEM_TASK:> Add a progression bar for the current download. <END_TASK> <USER_TASK:> Description: def tqdm(self): """Add a progression bar for the current download."""
async_tqdm = utils.async_tqdm with async_tqdm(total=0, desc='Dl Completed...', unit=' url') as pbar_url: with async_tqdm(total=0, desc='Dl Size...', unit=' MiB') as pbar_dl_size: self._pbar_url = pbar_url self._pbar_dl_size = pbar_dl_size yield
<SYSTEM_TASK:> Download url to given path. <END_TASK> <USER_TASK:> Description: def download(self, url, destination_path): """Download url to given path. Returns Promise -> sha256 of downloaded file. Args: url: address of resource to download. destination_path: `str`, path to directory where to download the resource. Returns: Promise obj -> (`str`, int): (downloaded object checksum, size in bytes). """
self._pbar_url.update_total(1) future = self._executor.submit(self._sync_download, url, destination_path) return promise.Promise.resolve(future)
<SYSTEM_TASK:> Returns url, possibly with confirmation token. <END_TASK> <USER_TASK:> Description: def _get_drive_url(self, url, session): """Returns url, possibly with confirmation token."""
response = session.get(url, stream=True) if response.status_code != 200: raise DownloadError( 'Failed to get url %s. HTTP code: %d.' % (url, response.status_code)) for k, v in response.cookies.items(): if k.startswith('download_warning'): return url + '&confirm=' + v # v is the confirm token # No token found, let's try with original URL: return url
<SYSTEM_TASK:> Yields Example instances from given CSV. <END_TASK> <USER_TASK:> Description: def _generate_examples(self, images_dir_path, csv_path=None, csv_usage=None): """Yields Example instances from given CSV. Args: images_dir_path: path to dir in which images are stored. csv_path: optional, path to csv file with two columns: name of image and label. If not provided, just scan image directory, don't set labels. csv_usage: optional, subset of examples from the csv file to use based on the "Usage" column from the csv. """
if csv_path: with tf.io.gfile.GFile(csv_path) as csv_f: reader = csv.DictReader(csv_f) data = [(row["image"], int(row["level"])) for row in reader if csv_usage is None or row["Usage"] == csv_usage] else: data = [(fname[:-5], -1) for fname in tf.io.gfile.listdir(images_dir_path) if fname.endswith(".jpeg")] for name, label in data: yield { "name": name, "image": _resize_image_if_necessary( tf.io.gfile.GFile("%s/%s.jpeg" % (images_dir_path, name), mode="rb"), target_pixels=self.builder_config.target_pixels), "label": label, }
<SYSTEM_TASK:> Return the list of files and reading mask of the files to read. <END_TASK> <USER_TASK:> Description: def _slice_split_info_to_instruction_dicts(self, list_sliced_split_info): """Return the list of files and reading mask of the files to read."""
instruction_dicts = [] for sliced_split_info in list_sliced_split_info: mask = splits_lib.slice_to_percent_mask(sliced_split_info.slice_value) # Compute filenames from the given split filepaths = list(sorted(self._build_split_filenames( split_info_list=[sliced_split_info.split_info], ))) # Compute the offsets if sliced_split_info.split_info.num_examples: shard_id2num_examples = splits_lib.get_shard_id2num_examples( sliced_split_info.split_info.num_shards, sliced_split_info.split_info.num_examples, ) mask_offsets = splits_lib.compute_mask_offsets(shard_id2num_examples) else: logging.warning( "Statistics not present in the dataset. TFDS is not able to load " "the total number of examples, so using the subsplit API may not " "provide precise subsplits." ) mask_offsets = [0] * len(filepaths) for filepath, mask_offset in zip(filepaths, mask_offsets): instruction_dicts.append({ "filepath": filepath, "mask": mask, "mask_offset": mask_offset, }) return instruction_dicts
<SYSTEM_TASK:> Construct the split filenames associated with the split info. <END_TASK> <USER_TASK:> Description: def _build_split_filenames(self, split_info_list): """Construct the split filenames associated with the split info. The filenames correspond to the pre-processed datasets files present in the root directory of the dataset. Args: split_info_list: (list[SplitInfo]) List of split from which generate the filenames Returns: filenames: (list[str]) The list of filenames path corresponding to the split info object """
filenames = [] for split_info in split_info_list: filenames.extend(naming.filepaths_for_dataset_split( dataset_name=self.name, split=split_info.name, num_shards=split_info.num_shards, data_dir=self._data_dir, filetype_suffix=self._file_format_adapter.filetype_suffix, )) return filenames
<SYSTEM_TASK:> Generate MovingMnist sequences. <END_TASK> <USER_TASK:> Description: def _generate_examples(self, data_path): """Generate MovingMnist sequences. Args: data_path (str): Path to the data file Yields: 20 x 64 x 64 x 1 uint8 numpy arrays """
with tf.io.gfile.GFile(data_path, "rb") as fp: images = np.load(fp) images = np.transpose(images, (1, 0, 2, 3)) images = np.expand_dims(images, axis=-1) for sequence in images: yield dict(image_sequence=sequence)
<SYSTEM_TASK:> Parses single video from the input tfrecords. <END_TASK> <USER_TASK:> Description: def _parse_single_video(self, example_proto): """Parses single video from the input tfrecords. Args: example_proto: tfExample proto with a single video. Returns: dict with all frames, positions and actions. """
context_features = { "game_duration_loops": tf.io.FixedLenFeature([1], tf.int64), "game_duration_seconds": tf.io.FixedLenFeature([1], tf.float32), "n_steps": tf.io.FixedLenFeature([1], tf.int64), "screen_size": tf.io.FixedLenFeature([2], tf.int64), } sequence_features = { "rgb_screen": tf.io.FixedLenSequenceFeature([], tf.string), } _, seq_feat = tf.io.parse_single_sequence_example( example_proto, context_features=context_features, sequence_features=sequence_features) video_frames = tf.map_fn( tf.image.decode_png, seq_feat["rgb_screen"], dtype=tf.uint8) return video_frames
<SYSTEM_TASK:> Generates examples for the dSprites data set. <END_TASK> <USER_TASK:> Description: def _generate_examples(self, filepath): """Generates examples for the dSprites data set. Args: filepath: path to the dSprites hdf5 file. Yields: Dictionaries with images, latent classes, and latent values. """
# Simultaneously iterating through the different data sets in the hdf5 # file is >100x slower and the data set is small (26.7MB). Hence, we first # load everything into memory before yielding the samples. image_array, class_array, values_array = _load_data(filepath) for image, classes, values in moves.zip(image_array, class_array, values_array): yield dict( image=np.expand_dims(image, -1), label_shape=classes[1], label_scale=classes[2], label_orientation=classes[3], label_x_position=classes[4], label_y_position=classes[5], value_shape=values[1], value_scale=values[2], value_orientation=values[3], value_x_position=values[4], value_y_position=values[5])
<SYSTEM_TASK:> Returns bounded boxes listed within given CSV file. <END_TASK> <USER_TASK:> Description: def _load_bboxes(csv_path, csv_positions, prefix): """Returns bounded boxes listed within given CSV file."""
logging.info('Loading CSVs %s from positions %s with prefix %s', csv_path, csv_positions, prefix) boxes = collections.defaultdict(list) with tf.io.gfile.GFile(csv_path) as csv_f: if csv_positions[0] > 0: csv_f.seek(csv_positions[0]) else: csv_f.readline() # Drop headers reader = csv.reader(csv_f) for (image_id, source, label, confidence, xmin, xmax, ymin, ymax, is_occluded, is_truncated, is_group_of, is_depiction, is_inside, ) in reader: if prefix and image_id[0] != prefix: break csv_positions[0] = csv_f.tell() image_id = int(image_id, 16) del confidence # always 1 in bounding boxes. current_row = _Bbox( label, source, tfds.features.BBox( float(ymin), float(xmin), float(ymax), float(xmax)), int(is_occluded), int(is_truncated), int(is_group_of), int(is_depiction), int(is_inside)) boxes[image_id].append(current_row) return dict(boxes)
<SYSTEM_TASK:> Find files corresponding to urls. <END_TASK> <USER_TASK:> Description: def _find_files(dl_paths, publisher, url_dict): """Find files corresponding to urls."""
if publisher == 'cnn': top_dir = os.path.join(dl_paths['cnn_stories'], 'cnn', 'stories') elif publisher == 'dm': top_dir = os.path.join(dl_paths['dm_stories'], 'dailymail', 'stories') else: logging.fatal('Unsupported publisher: %s', publisher) files = tf.io.gfile.listdir(top_dir) ret_files = [] for p in files: basename = os.path.basename(p) if basename[0:basename.find('.story')] in url_dict: ret_files.append(os.path.join(top_dir, p)) return ret_files
<SYSTEM_TASK:> Get filenames for a particular split. <END_TASK> <USER_TASK:> Description: def _subset_filenames(dl_paths, split): """Get filenames for a particular split."""
assert isinstance(dl_paths, dict), dl_paths # Get filenames for a split. if split == tfds.Split.TRAIN: urls = _get_url_hashes(dl_paths['train_urls']) elif split == tfds.Split.VALIDATION: urls = _get_url_hashes(dl_paths['val_urls']) elif split == tfds.Split.TEST: urls = _get_url_hashes(dl_paths['test_urls']) else: logging.fatal('Unsupported split: %s', split) cnn = _find_files(dl_paths, 'cnn', urls) dm = _find_files(dl_paths, 'dm', urls) return cnn + dm
<SYSTEM_TASK:> Handle the requests and return the response body. <END_TASK> <USER_TASK:> Description: def requester( url, main_url=None, delay=0, cook=None, headers=None, timeout=10, host=None, proxies=[None], user_agents=[None], failed=None, processed=None ): """Handle the requests and return the response body."""
cook = cook or set() headers = headers or set() user_agents = user_agents or ['Photon'] failed = failed or set() processed = processed or set() # Mark the URL as crawled processed.add(url) # Pause/sleep the program for specified time time.sleep(delay) def make_request(url): """Default request""" final_headers = headers or { 'Host': host, # Selecting a random user-agent 'User-Agent': random.choice(user_agents), 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en-US,en;q=0.5', 'Accept-Encoding': 'gzip', 'DNT': '1', 'Connection': 'close', } try: response = SESSION.get( url, cookies=cook, headers=final_headers, verify=False, timeout=timeout, stream=True, proxies=random.choice(proxies) ) except TooManyRedirects: return 'dummy' if 'text/html' in response.headers['content-type'] or \ 'text/plain' in response.headers['content-type']: if response.status_code != '404': return response.text else: response.close() failed.add(url) return 'dummy' else: response.close() return 'dummy' return make_request(url)
<SYSTEM_TASK:> Extract intel from the response body. <END_TASK> <USER_TASK:> Description: def intel_extractor(url, response): """Extract intel from the response body."""
for rintel in rintels: res = re.sub(r'<(script).*?</\1>(?s)', '', response) res = re.sub(r'<[^<]+?>', '', res) matches = rintel[0].findall(res) if matches: for match in matches: verb('Intel', match) bad_intel.add((match, rintel[1], url))
<SYSTEM_TASK:> Extract js files from the response body <END_TASK> <USER_TASK:> Description: def js_extractor(response): """Extract js files from the response body"""
# Extract .js files matches = rscript.findall(response) for match in matches: match = match[2].replace('\'', '').replace('"', '') verb('JS file', match) bad_scripts.add(match)
<SYSTEM_TASK:> Extract details from the response body. <END_TASK> <USER_TASK:> Description: def extractor(url): """Extract details from the response body."""
response = requester(url, main_url, delay, cook, headers, timeout, host, proxies, user_agents, failed, processed) if clone: mirror(url, response) matches = rhref.findall(response) for link in matches: # Remove everything after a "#" to deal with in-page anchors link = link[1].replace('\'', '').replace('"', '').split('#')[0] # Checks if the URLs should be crawled if is_link(link, processed, files): if link[:4] == 'http': if link.startswith(main_url): verb('Internal page', link) internal.add(link) else: verb('External page', link) external.add(link) elif link[:2] == '//': if link.split('/')[2].startswith(host): verb('Internal page', link) internal.add(schema + '://' + link) else: verb('External page', link) external.add(link) elif link[:1] == '/': verb('Internal page', link) internal.add(remove_file(url) + link) else: verb('Internal page', link) usable_url = remove_file(url) if usable_url.endswith('/'): internal.add(usable_url + link) elif link.startswith('/'): internal.add(usable_url + link) else: internal.add(usable_url + '/' + link) if not only_urls: intel_extractor(url, response) js_extractor(response) if args.regex and not supress_regex: regxy(args.regex, response, supress_regex, custom) if api: matches = rentropy.findall(response) for match in matches: if entropy(match) >= 4: verb('Key', match) keys.add(url + ': ' + match)
<SYSTEM_TASK:> Extract endpoints from JavaScript code. <END_TASK> <USER_TASK:> Description: def jscanner(url): """Extract endpoints from JavaScript code."""
response = requester(url, main_url, delay, cook, headers, timeout, host, proxies, user_agents, failed, processed) # Extract URLs/endpoints matches = rendpoint.findall(response) # Iterate over the matches, match is a tuple for match in matches: # Combining the items because one of them is always empty match = match[0] + match[1] # Making sure it's not some JavaScript code if not re.search(r'[}{><"\']', match) and not match == '/': verb('JS endpoint', match) endpoints.add(match)
<SYSTEM_TASK:> Update the current installation. <END_TASK> <USER_TASK:> Description: def updater(): """Update the current installation. git clones the latest version and merges it with the current directory. """
print('%s Checking for updates' % run) # Changes must be separated by ; changes = '''major bug fixes;removed ninja mode;dropped python < 3.2 support;fixed unicode output;proxy support;more intels''' latest_commit = requester('https://raw.githubusercontent.com/s0md3v/Photon/master/core/updater.py', host='raw.githubusercontent.com') # Just a hack to see if a new version is available if changes not in latest_commit: changelog = re.search(r"changes = '''(.*?)'''", latest_commit) # Splitting the changes to form a list changelog = changelog.group(1).split(';') print('%s A new version of Photon is available.' % good) print('%s Changes:' % info) for change in changelog: # print changes print('%s>%s %s' % (green, end, change)) current_path = os.getcwd().split('/') # if you know it, you know it folder = current_path[-1] # current directory name path = '/'.join(current_path) # current directory path choice = input('%s Would you like to update? [Y/n] ' % que).lower() if choice != 'n': print('%s Updating Photon' % run) os.system('git clone --quiet https://github.com/s0md3v/Photon %s' % (folder)) os.system('cp -r %s/%s/* %s && rm -r %s/%s/ 2>/dev/null' % (path, folder, path, path, folder)) print('%s Update successful!' % good) else: print('%s Photon is up to date!' % good)
<SYSTEM_TASK:> Find subdomains according to the TLD. <END_TASK> <USER_TASK:> Description: def find_subdomains(domain): """Find subdomains according to the TLD."""
result = set() response = get('https://findsubdomains.com/subdomains-of/' + domain).text matches = findall(r'(?s)<div class="domains js-domain-name">(.*?)</div>', response) for match in matches: result.add(match.replace(' ', '').replace('\n', '')) return list(result)
<SYSTEM_TASK:> Process the URLs and uses a threadpool to execute a function. <END_TASK> <USER_TASK:> Description: def flash(function, links, thread_count): """Process the URLs and uses a threadpool to execute a function."""
# Convert links (set) to list links = list(links) threadpool = concurrent.futures.ThreadPoolExecutor( max_workers=thread_count) futures = (threadpool.submit(function, link) for link in links) for i, _ in enumerate(concurrent.futures.as_completed(futures)): if i + 1 == len(links) or (i + 1) % thread_count == 0: print('%s Progress: %i/%i' % (info, i + 1, len(links)), end='\r') print('')
<SYSTEM_TASK:> Extract a string based on regex pattern supplied by user. <END_TASK> <USER_TASK:> Description: def regxy(pattern, response, supress_regex, custom): """Extract a string based on regex pattern supplied by user."""
try: matches = re.findall(r'%s' % pattern, response) for match in matches: verb('Custom regex', match) custom.add(match) except: supress_regex = True
<SYSTEM_TASK:> Determine whether or not a link should be crawled <END_TASK> <USER_TASK:> Description: def is_link(url, processed, files): """ Determine whether or not a link should be crawled A url should not be crawled if it - Is a file - Has already been crawled Args: url: str Url to be processed processed: list[str] List of urls that have already been crawled Returns: bool If `url` should be crawled """
if url not in processed: is_file = url.endswith(BAD_TYPES) if is_file: files.add(url) return False return True return False
<SYSTEM_TASK:> Parse a list for non-matches to a regex. <END_TASK> <USER_TASK:> Description: def remove_regex(urls, regex): """ Parse a list for non-matches to a regex. Args: urls: iterable of urls regex: string regex to be parsed for Returns: list of strings not matching regex """
if not regex: return urls # To avoid iterating over the characters of a string if not isinstance(urls, (list, set, tuple)): urls = [urls] try: non_matching_urls = [url for url in urls if not re.search(regex, url)] except TypeError: return [] return non_matching_urls
<SYSTEM_TASK:> Extract the top level domain from an URL. <END_TASK> <USER_TASK:> Description: def top_level(url, fix_protocol=True): """Extract the top level domain from an URL."""
ext = tld.get_tld(url, fix_protocol=fix_protocol) toplevel = '.'.join(urlparse(url).netloc.split('.')[-2:]).split( ext)[0] + ext return toplevel
<SYSTEM_TASK:> Present the user a prompt. <END_TASK> <USER_TASK:> Description: def prompt(default=None): """Present the user a prompt."""
editor = 'nano' with tempfile.NamedTemporaryFile(mode='r+') as tmpfile: if default: tmpfile.write(default) tmpfile.flush() child_pid = os.fork() is_child = child_pid == 0 if is_child: os.execvp(editor, [editor, tmpfile.name]) else: os.waitpid(child_pid, 0) tmpfile.seek(0) return tmpfile.read().strip()
<SYSTEM_TASK:> generator driven data flow <END_TASK> <USER_TASK:> Description: def run(self): """generator driven data flow """
# 如果出现了日期的改变 才会进行结算的事件 _date = None while QA_util_if_tradetime(self.now): for data in self.ingest_data: # 对于在ingest_data中的数据 # <class 'QUANTAXIS.QAData.QADataStruct.QA_DataStruct_Stock_day'> date = data.date[0] if self.market_type is MARKET_TYPE.STOCK_CN: # 如果是股票市场 if _date != date: # 如果新的date # 前一天的交易日已经过去 # 往 broker 和 account 发送 settle 事件 try: self.market.trade_engine.join() # time.sleep(2) self.market._settle(self.broker_name) except Exception as e: raise e # 基金 指数 期货 elif self.market_type in [MARKET_TYPE.FUND_CN, MARKET_TYPE.INDEX_CN, MARKET_TYPE.FUTURE_CN]: self.market._settle(self.broker_name) # print(data) self.broker.run( QA_Event(event_type=ENGINE_EVENT.UPCOMING_DATA, market_data=data)) # 生成 UPCOMING_DATA 事件放到 队列中去执行 self.market.upcoming_data(self.broker_name, data) self.market.trade_engine.join() _date = date
<SYSTEM_TASK:> plot the market_data <END_TASK> <USER_TASK:> Description: def kline_echarts(self, code=None): def kline_formater(param): return param.name + ':' + vars(param) """plot the market_data"""
if code is None: path_name = '.' + os.sep + 'QA_' + self.type + \ '_codepackage_' + self.if_fq + '.html' kline = Kline( 'CodePackage_' + self.if_fq + '_' + self.type, width=1360, height=700, page_title='QUANTAXIS' ) bar = Bar() data_splits = self.splits() for ds in data_splits: data = [] axis = [] if ds.type[-3:] == 'day': datetime = np.array(ds.date.map(str)) else: datetime = np.array(ds.datetime.map(str)) ohlc = np.array( ds.data.loc[:, ['open', 'close', 'low', 'high']] ) kline.add( ds.code[0], datetime, ohlc, mark_point=["max", "min"], is_datazoom_show=True, datazoom_orient='horizontal' ) return kline else: data = [] axis = [] ds = self.select_code(code) data = [] #axis = [] if self.type[-3:] == 'day': datetime = np.array(ds.date.map(str)) else: datetime = np.array(ds.datetime.map(str)) ohlc = np.array(ds.data.loc[:, ['open', 'close', 'low', 'high']]) vol = np.array(ds.volume) kline = Kline( '{}__{}__{}'.format(code, self.if_fq, self.type), width=1360, height=700, page_title='QUANTAXIS' ) bar = Bar() kline.add(self.code, datetime, ohlc, mark_point=["max", "min"], # is_label_show=True, is_datazoom_show=True, is_xaxis_show=False, # is_toolbox_show=True, tooltip_formatter='{b}:{c}', # kline_formater, # is_more_utils=True, datazoom_orient='horizontal') bar.add( self.code, datetime, vol, is_datazoom_show=True, datazoom_xaxis_index=[0, 1] ) grid = Grid(width=1360, height=700, page_title='QUANTAXIS') grid.add(bar, grid_top="80%") grid.add(kline, grid_bottom="30%") return grid
<SYSTEM_TASK:> Create the tables needed to store the information. <END_TASK> <USER_TASK:> Description: def _create(self, cache_file): """Create the tables needed to store the information."""
conn = sqlite3.connect(cache_file) cur = conn.cursor() cur.execute("PRAGMA foreign_keys = ON") cur.execute(''' CREATE TABLE jobs( hash TEXT NOT NULL UNIQUE PRIMARY KEY, description TEXT NOT NULL, last_run REAL, next_run REAL, last_run_result INTEGER)''') cur.execute(''' CREATE TABLE history( hash TEXT, description TEXT, time REAL, result INTEGER, FOREIGN KEY(hash) REFERENCES jobs(hash))''') conn.commit() conn.close()
<SYSTEM_TASK:> Return the bot's response based on the input. <END_TASK> <USER_TASK:> Description: def get_response(self, statement=None, **kwargs): """ Return the bot's response based on the input. :param statement: An statement object or string. :returns: A response to the input. :rtype: Statement :param additional_response_selection_parameters: Parameters to pass to the chat bot's logic adapters to control response selection. :type additional_response_selection_parameters: dict :param persist_values_to_response: Values that should be saved to the response that the chat bot generates. :type persist_values_to_response: dict """
Statement = self.storage.get_object('statement') additional_response_selection_parameters = kwargs.pop('additional_response_selection_parameters', {}) persist_values_to_response = kwargs.pop('persist_values_to_response', {}) if isinstance(statement, str): kwargs['text'] = statement if isinstance(statement, dict): kwargs.update(statement) if statement is None and 'text' not in kwargs: raise self.ChatBotException( 'Either a statement object or a "text" keyword ' 'argument is required. Neither was provided.' ) if hasattr(statement, 'serialize'): kwargs.update(**statement.serialize()) tags = kwargs.pop('tags', []) text = kwargs.pop('text') input_statement = Statement(text=text, **kwargs) input_statement.add_tags(*tags) # Preprocess the input statement for preprocessor in self.preprocessors: input_statement = preprocessor(input_statement) # Make sure the input statement has its search text saved if not input_statement.search_text: input_statement.search_text = self.storage.tagger.get_bigram_pair_string(input_statement.text) if not input_statement.search_in_response_to and input_statement.in_response_to: input_statement.search_in_response_to = self.storage.tagger.get_bigram_pair_string(input_statement.in_response_to) response = self.generate_response(input_statement, additional_response_selection_parameters) # Update any response data that needs to be changed if persist_values_to_response: for response_key in persist_values_to_response: response_value = persist_values_to_response[response_key] if response_key == 'tags': input_statement.add_tags(*response_value) response.add_tags(*response_value) else: setattr(input_statement, response_key, response_value) setattr(response, response_key, response_value) if not self.read_only: self.learn_response(input_statement) # Save the response generated for the input self.storage.create(**response.serialize()) return response
<SYSTEM_TASK:> Return a response based on a given input statement. <END_TASK> <USER_TASK:> Description: def generate_response(self, input_statement, additional_response_selection_parameters=None): """ Return a response based on a given input statement. :param input_statement: The input statement to be processed. """
Statement = self.storage.get_object('statement') results = [] result = None max_confidence = -1 for adapter in self.logic_adapters: if adapter.can_process(input_statement): output = adapter.process(input_statement, additional_response_selection_parameters) results.append(output) self.logger.info( '{} selected "{}" as a response with a confidence of {}'.format( adapter.class_name, output.text, output.confidence ) ) if output.confidence > max_confidence: result = output max_confidence = output.confidence else: self.logger.info( 'Not processing the statement using {}'.format(adapter.class_name) ) class ResultOption: def __init__(self, statement, count=1): self.statement = statement self.count = count # If multiple adapters agree on the same statement, # then that statement is more likely to be the correct response if len(results) >= 3: result_options = {} for result_option in results: result_string = result_option.text + ':' + (result_option.in_response_to or '') if result_string in result_options: result_options[result_string].count += 1 if result_options[result_string].statement.confidence < result_option.confidence: result_options[result_string].statement = result_option else: result_options[result_string] = ResultOption( result_option ) most_common = list(result_options.values())[0] for result_option in result_options.values(): if result_option.count > most_common.count: most_common = result_option if most_common.count > 1: result = most_common.statement response = Statement( text=result.text, in_response_to=input_statement.text, conversation=input_statement.conversation, persona='bot:' + self.name ) response.confidence = result.confidence return response
<SYSTEM_TASK:> Learn that the statement provided is a valid response. <END_TASK> <USER_TASK:> Description: def learn_response(self, statement, previous_statement=None): """ Learn that the statement provided is a valid response. """
if not previous_statement: previous_statement = statement.in_response_to if not previous_statement: previous_statement = self.get_latest_response(statement.conversation) if previous_statement: previous_statement = previous_statement.text previous_statement_text = previous_statement if not isinstance(previous_statement, (str, type(None), )): statement.in_response_to = previous_statement.text elif isinstance(previous_statement, str): statement.in_response_to = previous_statement self.logger.info('Adding "{}" as a response to "{}"'.format( statement.text, previous_statement_text )) # Save the input statement return self.storage.create(**statement.serialize())
<SYSTEM_TASK:> Imports the specified module based on the <END_TASK> <USER_TASK:> Description: def import_module(dotted_path): """ Imports the specified module based on the dot notated import path for the module. """
import importlib module_parts = dotted_path.split('.') module_path = '.'.join(module_parts[:-1]) module = importlib.import_module(module_path) return getattr(module, module_parts[-1])
<SYSTEM_TASK:> Raises an exception if validate_class is not a <END_TASK> <USER_TASK:> Description: def validate_adapter_class(validate_class, adapter_class): """ Raises an exception if validate_class is not a subclass of adapter_class. :param validate_class: The class to be validated. :type validate_class: class :param adapter_class: The class type to check against. :type adapter_class: class :raises: Adapter.InvalidAdapterTypeException """
from chatterbot.adapters import Adapter # If a dictionary was passed in, check if it has an import_path attribute if isinstance(validate_class, dict): if 'import_path' not in validate_class: raise Adapter.InvalidAdapterTypeException( 'The dictionary {} must contain a value for "import_path"'.format( str(validate_class) ) ) # Set the class to the import path for the next check validate_class = validate_class.get('import_path') if not issubclass(import_module(validate_class), adapter_class): raise Adapter.InvalidAdapterTypeException( '{} must be a subclass of {}'.format( validate_class, adapter_class.__name__ ) )
<SYSTEM_TASK:> Returns the amount of time taken for a given <END_TASK> <USER_TASK:> Description: def get_response_time(chatbot, statement='Hello'): """ Returns the amount of time taken for a given chat bot to return a response. :param chatbot: A chat bot instance. :type chatbot: ChatBot :returns: The response time in seconds. :rtype: float """
import time start_time = time.time() chatbot.get_response(statement) return time.time() - start_time
<SYSTEM_TASK:> Returns the firt match `pint.unit.Unit` object for from_unit and <END_TASK> <USER_TASK:> Description: def get_valid_units(self, ureg, from_unit, target_unit): """ Returns the firt match `pint.unit.Unit` object for from_unit and target_unit strings from a possible variation of metric unit names supported by pint library. :param ureg: unit registry which units are defined and handled :type ureg: `pint.registry.UnitRegistry` :param from_unit: source metric unit :type from_unit: str :param from_unit: target metric unit :type from_unit: str """
from_unit_variations = [from_unit.lower(), from_unit.upper()] target_unit_variations = [target_unit.lower(), target_unit.upper()] from_unit = self.get_unit(ureg, from_unit_variations) target_unit = self.get_unit(ureg, target_unit_variations) return from_unit, target_unit
<SYSTEM_TASK:> Returns a response statement from a matched input statement. <END_TASK> <USER_TASK:> Description: def handle_matches(self, match): """ Returns a response statement from a matched input statement. :param match: It is a valid matched pattern from the input statement :type: `_sre.SRE_Match` """
response = Statement(text='') from_parsed = match.group("from") target_parsed = match.group("target") n_statement = match.group("number") if n_statement == 'a' or n_statement == 'an': n_statement = '1.0' n = mathparse.parse(n_statement, self.language.ISO_639.upper()) ureg = UnitRegistry() from_parsed, target_parsed = self.get_valid_units(ureg, from_parsed, target_parsed) if from_parsed is None or target_parsed is None: response.confidence = 0.0 else: from_value = ureg.Quantity(float(n), from_parsed) target_value = from_value.to(target_parsed) response.confidence = 1.0 response.text = str(target_value.magnitude) return response
<SYSTEM_TASK:> This method is called when a logic adapter is unable to generate any <END_TASK> <USER_TASK:> Description: def get_default_response(self, input_statement): """ This method is called when a logic adapter is unable to generate any other meaningful response. """
from random import choice if self.default_responses: response = choice(self.default_responses) else: try: response = self.chatbot.storage.get_random() except StorageAdapter.EmptyDatabaseException: response = input_statement self.chatbot.logger.info( 'No known response to the input was found. Selecting a random response.' ) # Set confidence to zero because a random response is selected response.confidence = 0 return response
<SYSTEM_TASK:> Provide an analysis of significant features in the string. <END_TASK> <USER_TASK:> Description: def time_question_features(self, text): """ Provide an analysis of significant features in the string. """
features = {} # A list of all words from the known sentences all_words = " ".join(self.positive + self.negative).split() # A list of the first word in each of the known sentence all_first_words = [] for sentence in self.positive + self.negative: all_first_words.append( sentence.split(' ', 1)[0] ) for word in text.split(): features['first_word({})'.format(word)] = (word in all_first_words) for word in text.split(): features['contains({})'.format(word)] = (word in all_words) for letter in 'abcdefghijklmnopqrstuvwxyz': features['count({})'.format(letter)] = text.lower().count(letter) features['has({})'.format(letter)] = (letter in text.lower()) return features
<SYSTEM_TASK:> Determines whether it is appropriate for this <END_TASK> <USER_TASK:> Description: def can_process(self, statement): """ Determines whether it is appropriate for this adapter to respond to the user input. """
response = self.process(statement) self.cache[statement.text] = response return response.confidence == 1
<SYSTEM_TASK:> Takes a statement string. <END_TASK> <USER_TASK:> Description: def process(self, statement, additional_response_selection_parameters=None): """ Takes a statement string. Returns the equation from the statement with the mathematical terms solved. """
from mathparse import mathparse input_text = statement.text # Use the result cached by the process method if it exists if input_text in self.cache: cached_result = self.cache[input_text] self.cache = {} return cached_result # Getting the mathematical terms within the input statement expression = mathparse.extract_expression(input_text, language=self.language.ISO_639.upper()) response = Statement(text=expression) try: response.text += ' = ' + str( mathparse.parse(expression, language=self.language.ISO_639.upper()) ) # The confidence is 1 if the expression could be evaluated response.confidence = 1 except mathparse.PostfixTokenEvaluationException: response.confidence = 0 return response
<SYSTEM_TASK:> A filter that eliminates possibly repetitive responses to prevent <END_TASK> <USER_TASK:> Description: def get_recent_repeated_responses(chatbot, conversation, sample=10, threshold=3, quantity=3): """ A filter that eliminates possibly repetitive responses to prevent a chat bot from repeating statements that it has recently said. """
from collections import Counter # Get the most recent statements from the conversation conversation_statements = list(chatbot.storage.filter( conversation=conversation, order_by=['id'] ))[sample * -1:] text_of_recent_responses = [ statement.text for statement in conversation_statements ] counter = Counter(text_of_recent_responses) # Find the n most common responses from the conversation most_common = counter.most_common(quantity) return [ counted[0] for counted in most_common if counted[1] >= threshold ]
<SYSTEM_TASK:> Return the calculated similarity of two <END_TASK> <USER_TASK:> Description: def compare(self, statement_a, statement_b): """ Return the calculated similarity of two statements based on the Jaccard index. """
# Make both strings lowercase document_a = self.nlp(statement_a.text.lower()) document_b = self.nlp(statement_b.text.lower()) statement_a_lemmas = set([ token.lemma_ for token in document_a if not token.is_stop ]) statement_b_lemmas = set([ token.lemma_ for token in document_b if not token.is_stop ]) # Calculate Jaccard similarity numerator = len(statement_a_lemmas.intersection(statement_b_lemmas)) denominator = float(len(statement_a_lemmas.union(statement_b_lemmas))) ratio = numerator / denominator return ratio
<SYSTEM_TASK:> Return the class for the statement model. <END_TASK> <USER_TASK:> Description: def get_statement_model(self): """ Return the class for the statement model. """
from chatterbot.conversation import Statement # Create a storage-aware statement statement = Statement statement.storage = self return statement
<SYSTEM_TASK:> Return Statement object when given data <END_TASK> <USER_TASK:> Description: def mongo_to_object(self, statement_data): """ Return Statement object when given data returned from Mongo DB. """
Statement = self.get_model('statement') statement_data['id'] = statement_data['_id'] return Statement(**statement_data)
<SYSTEM_TASK:> Add a list of strings to the statement as tags. <END_TASK> <USER_TASK:> Description: def add_tags(self, *tags): """ Add a list of strings to the statement as tags. """
self.tags.extend([ Tag(name=tag) for tag in tags ])
<SYSTEM_TASK:> Create a file from the database that can be used to <END_TASK> <USER_TASK:> Description: def export_for_training(self, file_path='./export.json'): """ Create a file from the database that can be used to train other chat bots. """
import json export = {'conversations': self._generate_export_data()} with open(file_path, 'w+') as jsonfile: json.dump(export, jsonfile, ensure_ascii=False)
<SYSTEM_TASK:> Train the chat bot based on the provided list of <END_TASK> <USER_TASK:> Description: def train(self, conversation): """ Train the chat bot based on the provided list of statements that represents a single conversation. """
previous_statement_text = None previous_statement_search_text = '' statements_to_create = [] for conversation_count, text in enumerate(conversation): if self.show_training_progress: utils.print_progress_bar( 'List Trainer', conversation_count + 1, len(conversation) ) statement_search_text = self.chatbot.storage.tagger.get_bigram_pair_string(text) statement = self.get_preprocessed_statement( Statement( text=text, search_text=statement_search_text, in_response_to=previous_statement_text, search_in_response_to=previous_statement_search_text, conversation='training' ) ) previous_statement_text = statement.text previous_statement_search_text = statement_search_text statements_to_create.append(statement) self.chatbot.storage.create_many(statements_to_create)
<SYSTEM_TASK:> Check if the data file is already downloaded. <END_TASK> <USER_TASK:> Description: def is_downloaded(self, file_path): """ Check if the data file is already downloaded. """
if os.path.exists(file_path): self.chatbot.logger.info('File is already downloaded') return True return False
<SYSTEM_TASK:> Check if the data file is already extracted. <END_TASK> <USER_TASK:> Description: def is_extracted(self, file_path): """ Check if the data file is already extracted. """
if os.path.isdir(file_path): self.chatbot.logger.info('File is already extracted') return True return False
<SYSTEM_TASK:> Extract a tar file at the specified file path. <END_TASK> <USER_TASK:> Description: def extract(self, file_path): """ Extract a tar file at the specified file path. """
import tarfile print('Extracting {}'.format(file_path)) if not os.path.exists(self.extracted_data_directory): os.makedirs(self.extracted_data_directory) def track_progress(members): sys.stdout.write('.') for member in members: # This will be the current file being extracted yield member with tarfile.open(file_path) as tar: tar.extractall(path=self.extracted_data_directory, members=track_progress(tar)) self.chatbot.logger.info('File extracted to {}'.format(self.extracted_data_directory)) return True