text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Compute and cache a recarray of asset lifetimes. <END_TASK> <USER_TASK:> Description: def _compute_asset_lifetimes(self, country_codes): """ Compute and cache a recarray of asset lifetimes. """
equities_cols = self.equities.c if country_codes: buf = np.array( tuple( sa.select(( equities_cols.sid, equities_cols.start_date, equities_cols.end_date, )).where( (self.exchanges.c.exchange == equities_cols.exchange) & (self.exchanges.c.country_code.in_(country_codes)) ).execute(), ), dtype='f8', # use doubles so we get NaNs ) else: buf = np.array([], dtype='f8') lifetimes = np.recarray( buf=buf, shape=(len(buf),), dtype=[ ('sid', 'f8'), ('start', 'f8'), ('end', 'f8') ], ) start = lifetimes.start end = lifetimes.end start[np.isnan(start)] = 0 # convert missing starts to 0 end[np.isnan(end)] = np.iinfo(int).max # convert missing end to INTMAX # Cast the results back down to int. return lifetimes.astype([ ('sid', 'i8'), ('start', 'i8'), ('end', 'i8'), ])
<SYSTEM_TASK:> Compute a DataFrame representing asset lifetimes for the specified date <END_TASK> <USER_TASK:> Description: def lifetimes(self, dates, include_start_date, country_codes): """ Compute a DataFrame representing asset lifetimes for the specified date range. Parameters ---------- dates : pd.DatetimeIndex The dates for which to compute lifetimes. include_start_date : bool Whether or not to count the asset as alive on its start_date. This is useful in a backtesting context where `lifetimes` is being used to signify "do I have data for this asset as of the morning of this date?" For many financial metrics, (e.g. daily close), data isn't available for an asset until the end of the asset's first day. country_codes : iterable[str] The country codes to get lifetimes for. Returns ------- lifetimes : pd.DataFrame A frame of dtype bool with `dates` as index and an Int64Index of assets as columns. The value at `lifetimes.loc[date, asset]` will be True iff `asset` existed on `date`. If `include_start_date` is False, then lifetimes.loc[date, asset] will be false when date == asset.start_date. See Also -------- numpy.putmask zipline.pipeline.engine.SimplePipelineEngine._compute_root_mask """
if isinstance(country_codes, string_types): raise TypeError( "Got string {!r} instead of an iterable of strings in " "AssetFinder.lifetimes.".format(country_codes), ) # normalize to a cache-key so that we can memoize results. country_codes = frozenset(country_codes) lifetimes = self._asset_lifetimes.get(country_codes) if lifetimes is None: self._asset_lifetimes[country_codes] = lifetimes = ( self._compute_asset_lifetimes(country_codes) ) raw_dates = as_column(dates.asi8) if include_start_date: mask = lifetimes.start <= raw_dates else: mask = lifetimes.start < raw_dates mask &= (raw_dates <= lifetimes.end) return pd.DataFrame(mask, index=dates, columns=lifetimes.sid)
<SYSTEM_TASK:> Return all of the sids for a given country. <END_TASK> <USER_TASK:> Description: def equities_sids_for_country_code(self, country_code): """Return all of the sids for a given country. Parameters ---------- country_code : str An ISO 3166 alpha-2 country code. Returns ------- tuple[int] The sids whose exchanges are in this country. """
sids = self._compute_asset_lifetimes([country_code]).sid return tuple(sids.tolist())
<SYSTEM_TASK:> Get the latest minute on or before ``dt`` in which ``asset`` traded. <END_TASK> <USER_TASK:> Description: def get_last_traded_dt(self, asset, dt): """ Get the latest minute on or before ``dt`` in which ``asset`` traded. If there are no trades on or before ``dt``, returns ``pd.NaT``. Parameters ---------- asset : zipline.asset.Asset The asset for which to get the last traded minute. dt : pd.Timestamp The minute at which to start searching for the last traded minute. Returns ------- last_traded : pd.Timestamp The dt of the last trade for the given asset, using the input dt as a vantage point. """
rf = self._roll_finders[asset.roll_style] sid = (rf.get_contract_center(asset.root_symbol, dt, asset.offset)) if sid is None: return pd.NaT contract = rf.asset_finder.retrieve_asset(sid) return self._bar_reader.get_last_traded_dt(contract, dt)
<SYSTEM_TASK:> Compute each asset's weight in the portfolio by calculating its held <END_TASK> <USER_TASK:> Description: def current_portfolio_weights(self): """ Compute each asset's weight in the portfolio by calculating its held value divided by the total value of all positions. Each equity's value is its price times the number of shares held. Each futures contract's value is its unit price times number of shares held times the multiplier. """
position_values = pd.Series({ asset: ( position.last_sale_price * position.amount * asset.price_multiplier ) for asset, position in self.positions.items() }) return position_values / self.portfolio_value
<SYSTEM_TASK:> Reads and decodes an image from a file object as a Numpy array. <END_TASK> <USER_TASK:> Description: def _decode_image(fobj, session, filename): """Reads and decodes an image from a file object as a Numpy array. The SUN dataset contains images in several formats (despite the fact that all of them have .jpg extension). Some of them are: - BMP (RGB) - PNG (grayscale, RGBA, RGB interlaced) - JPEG (RGB) - GIF (1-frame RGB) Since TFDS assumes that all images have the same number of channels, we convert all of them to RGB. Args: fobj: File object to read from. session: TF session used to decode the images. filename: Filename of the original image in the archive. Returns: Numpy array with shape (height, width, channels). """
buf = fobj.read() image = tfds.core.lazy_imports.cv2.imdecode( np.fromstring(buf, dtype=np.uint8), flags=3) # Note: Converts to RGB. if image is None: logging.warning( "Image %s could not be decoded by OpenCV, falling back to TF", filename) try: image = tf.image.decode_image(buf, channels=3) image = session.run(image) except tf.errors.InvalidArgumentError: logging.fatal("Image %s could not be decoded by Tensorflow", filename) # The GIF images contain a single frame. if len(image.shape) == 4: # rank=4 -> rank=3 image = image.reshape(image.shape[1:]) return image
<SYSTEM_TASK:> Returns examples from parallel SGML or text files, which may be gzipped. <END_TASK> <USER_TASK:> Description: def _parse_parallel_sentences(f1, f2): """Returns examples from parallel SGML or text files, which may be gzipped."""
def _parse_text(path): """Returns the sentences from a single text file, which may be gzipped.""" split_path = path.split(".") if split_path[-1] == "gz": lang = split_path[-2] with tf.io.gfile.GFile(path) as f, gzip.GzipFile(fileobj=f) as g: return g.read().split("\n"), lang if split_path[-1] == "txt": # CWMT lang = split_path[-2].split("_")[-1] lang = "zh" if lang in ("ch", "cn") else lang else: lang = split_path[-1] with tf.io.gfile.GFile(path) as f: return f.read().split("\n"), lang def _parse_sgm(path): """Returns sentences from a single SGML file.""" lang = path.split(".")[-2] sentences = [] # Note: We can't use the XML parser since some of the files are badly # formatted. seg_re = re.compile(r"<seg id=\"\d+\">(.*)</seg>") with tf.io.gfile.GFile(path) as f: for line in f: seg_match = re.match(seg_re, line) if seg_match: assert len(seg_match.groups()) == 1 sentences.append(seg_match.groups()[0]) return sentences, lang parse_file = _parse_sgm if f1.endswith(".sgm") else _parse_text # Some datasets (e.g., CWMT) contain multiple parallel files specified with # a wildcard. We sort both sets to align them and parse them one by one. f1_files = tf.io.gfile.glob(f1) f2_files = tf.io.gfile.glob(f2) assert f1_files and f2_files, "No matching files found: %s, %s." % (f1, f2) assert len(f1_files) == len(f2_files), ( "Number of files do not match: %d vs %d for %s vs %s." % ( len(f1_files), len(f2_files), f1, f2)) for f1_i, f2_i in zip(sorted(f1_files), sorted(f2_files)): l1_sentences, l1 = parse_file(f1_i) l2_sentences, l2 = parse_file(f2_i) assert len(l1_sentences) == len(l2_sentences), ( "Sizes do not match: %d vs %d for %s vs %s." % ( len(l1_sentences), len(l2_sentences), f1_i, f2_i)) for s1, s2 in zip(l1_sentences, l2_sentences): yield { l1: s1, l2: s2 }
<SYSTEM_TASK:> Generates examples from TMX file. <END_TASK> <USER_TASK:> Description: def _parse_tmx(path): """Generates examples from TMX file."""
def _get_tuv_lang(tuv): for k, v in tuv.items(): if k.endswith("}lang"): return v raise AssertionError("Language not found in `tuv` attributes.") def _get_tuv_seg(tuv): segs = tuv.findall("seg") assert len(segs) == 1, "Invalid number of segments: %d" % len(segs) return segs[0].text with tf.io.gfile.GFile(path) as f: for _, elem in ElementTree.iterparse(f): if elem.tag == "tu": yield { _get_tuv_lang(tuv): _get_tuv_seg(tuv) for tuv in elem.iterfind("tuv") } elem.clear()
<SYSTEM_TASK:> Generates examples from TSV file. <END_TASK> <USER_TASK:> Description: def _parse_tsv(path, language_pair=None): """Generates examples from TSV file."""
if language_pair is None: lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])\.tsv", path) assert lang_match is not None, "Invalid TSV filename: %s" % path l1, l2 = lang_match.groups() else: l1, l2 = language_pair with tf.io.gfile.GFile(path) as f: for j, line in enumerate(f): cols = line.split("\t") if len(cols) != 2: logging.warning( "Skipping line %d in TSV (%s) with %d != 2 columns.", j, path, len(cols)) continue s1, s2 = cols yield { l1: s1.strip(), l2: s2.strip() }
<SYSTEM_TASK:> Generates examples from Wikiheadlines dataset file. <END_TASK> <USER_TASK:> Description: def _parse_wikiheadlines(path): """Generates examples from Wikiheadlines dataset file."""
lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])$", path) assert lang_match is not None, "Invalid Wikiheadlines filename: %s" % path l1, l2 = lang_match.groups() with tf.io.gfile.GFile(path) as f: for line in f: s1, s2 = line.split("|||") yield { l1: s1.strip(), l2: s2.strip() }
<SYSTEM_TASK:> Generates examples from CzEng v1.6, with optional filtering for v1.7. <END_TASK> <USER_TASK:> Description: def _parse_czeng(*paths, **kwargs): """Generates examples from CzEng v1.6, with optional filtering for v1.7."""
filter_path = kwargs.get("filter_path", None) if filter_path: re_block = re.compile(r"^[^-]+-b(\d+)-\d\d[tde]") with tf.io.gfile.GFile(filter_path) as f: bad_blocks = { blk for blk in re.search( r"qw{([\s\d]*)}", f.read()).groups()[0].split() } logging.info( "Loaded %d bad blocks to filter from CzEng v1.6 to make v1.7.", len(bad_blocks)) for path in paths: for gz_path in tf.io.gfile.glob(path): with tf.io.gfile.GFile(gz_path, "rb") as g, gzip.GzipFile(fileobj=g) as f: for line in f: line = line.decode("utf-8") # required for py3 if not line.strip(): continue id_, unused_score, cs, en = line.split("\t") if filter_path: block_match = re.match(re_block, id_) if block_match and block_match.groups()[0] in bad_blocks: continue yield { "cs": cs.strip(), "en": en.strip(), }
<SYSTEM_TASK:> Subsets that make up each split of the dataset for the language pair. <END_TASK> <USER_TASK:> Description: def subsets(self): """Subsets that make up each split of the dataset for the language pair."""
source, target = self.builder_config.language_pair filtered_subsets = {} for split, ss_names in self._subsets.items(): filtered_subsets[split] = [] for ss_name in ss_names: ds = DATASET_MAP[ss_name] if ds.target != target or source not in ds.sources: logging.info( "Skipping sub-dataset that does not include language pair: %s", ss_name) else: filtered_subsets[split].append(ss_name) logging.info("Using sub-datasets: %s", filtered_subsets) return filtered_subsets
<SYSTEM_TASK:> Fetches a `tfds.core.DatasetBuilder` by string name. <END_TASK> <USER_TASK:> Description: def builder(name, **builder_init_kwargs): """Fetches a `tfds.core.DatasetBuilder` by string name. Args: name: `str`, the registered name of the `DatasetBuilder` (the snake case version of the class name). This can be either `"dataset_name"` or `"dataset_name/config_name"` for datasets with `BuilderConfig`s. As a convenience, this string may contain comma-separated keyword arguments for the builder. For example `"foo_bar/a=True,b=3"` would use the `FooBar` dataset passing the keyword arguments `a=True` and `b=3` (for builders with configs, it would be `"foo_bar/zoo/a=True,b=3"` to use the `"zoo"` config and pass to the builder keyword arguments `a=True` and `b=3`). **builder_init_kwargs: `dict` of keyword arguments passed to the `DatasetBuilder`. These will override keyword arguments passed in `name`, if any. Returns: A `tfds.core.DatasetBuilder`. Raises: DatasetNotFoundError: if `name` is unrecognized. """
name, builder_kwargs = _dataset_name_and_kwargs_from_name_str(name) builder_kwargs.update(builder_init_kwargs) if name in _ABSTRACT_DATASET_REGISTRY: raise DatasetNotFoundError(name, is_abstract=True) if name in _IN_DEVELOPMENT_REGISTRY: raise DatasetNotFoundError(name, in_development=True) if name not in _DATASET_REGISTRY: raise DatasetNotFoundError(name) try: return _DATASET_REGISTRY[name](**builder_kwargs) except BaseException: logging.error("Failed to construct dataset %s", name) raise
<SYSTEM_TASK:> Loads the named dataset into a `tf.data.Dataset`. <END_TASK> <USER_TASK:> Description: def load(name, split=None, data_dir=None, batch_size=1, download=True, as_supervised=False, with_info=False, builder_kwargs=None, download_and_prepare_kwargs=None, as_dataset_kwargs=None, try_gcs=False): """Loads the named dataset into a `tf.data.Dataset`. If `split=None` (the default), returns all splits for the dataset. Otherwise, returns the specified split. `load` is a convenience method that fetches the `tfds.core.DatasetBuilder` by string name, optionally calls `DatasetBuilder.download_and_prepare` (if `download=True`), and then calls `DatasetBuilder.as_dataset`. This is roughly equivalent to: ``` builder = tfds.builder(name, data_dir=data_dir, **builder_kwargs) if download: builder.download_and_prepare(**download_and_prepare_kwargs) ds = builder.as_dataset( split=split, as_supervised=as_supervised, **as_dataset_kwargs) if with_info: return ds, builder.info return ds ``` If you'd like NumPy arrays instead of `tf.data.Dataset`s or `tf.Tensor`s, you can pass the return value to `tfds.as_numpy`. Callers must pass arguments as keyword arguments. **Warning**: calling this function might potentially trigger the download of hundreds of GiB to disk. Refer to the `download` argument. Args: name: `str`, the registered name of the `DatasetBuilder` (the snake case version of the class name). This can be either `"dataset_name"` or `"dataset_name/config_name"` for datasets with `BuilderConfig`s. As a convenience, this string may contain comma-separated keyword arguments for the builder. For example `"foo_bar/a=True,b=3"` would use the `FooBar` dataset passing the keyword arguments `a=True` and `b=3` (for builders with configs, it would be `"foo_bar/zoo/a=True,b=3"` to use the `"zoo"` config and pass to the builder keyword arguments `a=True` and `b=3`). split: `tfds.Split` or `str`, which split of the data to load. If None, will return a `dict` with all splits (typically `tfds.Split.TRAIN` and `tfds.Split.TEST`). data_dir: `str` (optional), directory to read/write data. Defaults to "~/tensorflow_datasets". batch_size: `int`, set to > 1 to get batches of examples. Note that variable length features will be 0-padded. If `batch_size=-1`, will return the full dataset as `tf.Tensor`s. download: `bool` (optional), whether to call `tfds.core.DatasetBuilder.download_and_prepare` before calling `tf.DatasetBuilder.as_dataset`. If `False`, data is expected to be in `data_dir`. If `True` and the data is already in `data_dir`, `download_and_prepare` is a no-op. as_supervised: `bool`, if `True`, the returned `tf.data.Dataset` will have a 2-tuple structure `(input, label)` according to `builder.info.supervised_keys`. If `False`, the default, the returned `tf.data.Dataset` will have a dictionary with all the features. with_info: `bool`, if True, tfds.load will return the tuple (tf.data.Dataset, tfds.core.DatasetInfo) containing the info associated with the builder. builder_kwargs: `dict` (optional), keyword arguments to be passed to the `tfds.core.DatasetBuilder` constructor. `data_dir` will be passed through by default. download_and_prepare_kwargs: `dict` (optional) keyword arguments passed to `tfds.core.DatasetBuilder.download_and_prepare` if `download=True`. Allow to control where to download and extract the cached data. If not set, cache_dir and manual_dir will automatically be deduced from data_dir. as_dataset_kwargs: `dict` (optional), keyword arguments passed to `tfds.core.DatasetBuilder.as_dataset`. `split` will be passed through by default. Example: `{'shuffle_files': True}`. Note that shuffle_files is False by default unless `split == tfds.Split.TRAIN`. try_gcs: `bool`, if True, tfds.load will see if the dataset exists on the public GCS bucket before building it locally. Returns: ds: `tf.data.Dataset`, the dataset requested, or if `split` is None, a `dict<key: tfds.Split, value: tfds.data.Dataset>`. If `batch_size=-1`, these will be full datasets as `tf.Tensor`s. ds_info: `tfds.core.DatasetInfo`, if `with_info` is True, then `tfds.load` will return a tuple `(ds, ds_info)` containing dataset information (version, features, splits, num_examples,...). Note that the `ds_info` object documents the entire dataset, regardless of the `split` requested. Split-specific information is available in `ds_info.splits`. """
name, name_builder_kwargs = _dataset_name_and_kwargs_from_name_str(name) name_builder_kwargs.update(builder_kwargs or {}) builder_kwargs = name_builder_kwargs # Set data_dir if try_gcs and gcs_utils.is_dataset_on_gcs(name): data_dir = constants.GCS_DATA_DIR elif data_dir is None: data_dir = constants.DATA_DIR dbuilder = builder(name, data_dir=data_dir, **builder_kwargs) if download: download_and_prepare_kwargs = download_and_prepare_kwargs or {} dbuilder.download_and_prepare(**download_and_prepare_kwargs) if as_dataset_kwargs is None: as_dataset_kwargs = {} as_dataset_kwargs = dict(as_dataset_kwargs) as_dataset_kwargs["split"] = split as_dataset_kwargs["as_supervised"] = as_supervised as_dataset_kwargs["batch_size"] = batch_size ds = dbuilder.as_dataset(**as_dataset_kwargs) if with_info: return ds, dbuilder.info return ds
<SYSTEM_TASK:> Extract kwargs from name str. <END_TASK> <USER_TASK:> Description: def _dataset_name_and_kwargs_from_name_str(name_str): """Extract kwargs from name str."""
res = _NAME_REG.match(name_str) if not res: raise ValueError(_NAME_STR_ERR.format(name_str)) name = res.group("dataset_name") kwargs = _kwargs_str_to_kwargs(res.group("kwargs")) try: for attr in ["config", "version"]: val = res.group(attr) if val is None: continue if attr in kwargs: raise ValueError("Dataset %s: cannot pass %s twice." % (name, attr)) kwargs[attr] = val return name, kwargs except: logging.error(_NAME_STR_ERR.format(name_str)) # pylint: disable=logging-format-interpolation raise
<SYSTEM_TASK:> Try cast to int, float, bool, str, in that order. <END_TASK> <USER_TASK:> Description: def _cast_to_pod(val): """Try cast to int, float, bool, str, in that order."""
bools = {"True": True, "False": False} if val in bools: return bools[val] try: return int(val) except ValueError: try: return float(val) except ValueError: return tf.compat.as_text(val)
<SYSTEM_TASK:> Try importing a module, with an informative error message on failure. <END_TASK> <USER_TASK:> Description: def _try_import(module_name): """Try importing a module, with an informative error message on failure."""
try: mod = importlib.import_module(module_name) return mod except ImportError: err_msg = ("Tried importing %s but failed. See setup.py extras_require. " "The dataset you are trying to use may have additional " "dependencies.") utils.reraise(err_msg)
<SYSTEM_TASK:> Returns list from list, tuple or ndarray. <END_TASK> <USER_TASK:> Description: def np_to_list(elem): """Returns list from list, tuple or ndarray."""
if isinstance(elem, list): return elem elif isinstance(elem, tuple): return list(elem) elif isinstance(elem, np.ndarray): return list(elem) else: raise ValueError( 'Input elements of a sequence should be either a numpy array, a ' 'python list or tuple. Got {}'.format(type(elem)))
<SYSTEM_TASK:> Generate MNIST examples as dicts. <END_TASK> <USER_TASK:> Description: def _generate_examples(self, num_examples, data_path, label_path): """Generate MNIST examples as dicts. Args: num_examples (int): The number of example. data_path (str): Path to the data files label_path (str): Path to the labels Yields: Generator yielding the next examples """
images = _extract_mnist_images(data_path, num_examples) labels = _extract_mnist_labels(label_path, num_examples) data = list(zip(images, labels)) # Data is shuffled automatically to distribute classes uniformly. for image, label in data: yield { "image": image, "label": label, }
<SYSTEM_TASK:> Calculate statistics for the specified split. <END_TASK> <USER_TASK:> Description: def get_dataset_feature_statistics(builder, split): """Calculate statistics for the specified split."""
statistics = statistics_pb2.DatasetFeatureStatistics() # Make this to the best of our abilities. schema = schema_pb2.Schema() dataset = builder.as_dataset(split=split) # Just computing the number of examples for now. statistics.num_examples = 0 # Feature dictionaries. feature_to_num_examples = collections.defaultdict(int) feature_to_min = {} feature_to_max = {} np_dataset = dataset_utils.as_numpy(dataset) for example in utils.tqdm(np_dataset, unit=" examples", leave=False): statistics.num_examples += 1 assert isinstance(example, dict) feature_names = sorted(example.keys()) for feature_name in feature_names: # Update the number of examples this feature appears in. feature_to_num_examples[feature_name] += 1 feature_np = example[feature_name] # For compatibility in graph and eager mode, we can get PODs here and # everything may not be neatly wrapped up in numpy's ndarray. feature_dtype = type(feature_np) if isinstance(feature_np, np.ndarray): # If we have an empty array, then don't proceed further with computing # statistics on it. if feature_np.size == 0: continue feature_dtype = feature_np.dtype.type feature_min, feature_max = None, None is_numeric = (np.issubdtype(feature_dtype, np.number) or feature_dtype == np.bool_) if is_numeric: feature_min = np.min(feature_np) feature_max = np.max(feature_np) # TODO(afrozm): What if shapes don't match? Populate ValueCount? Add # logic for that. # Set or update the min, max. if is_numeric: if ((feature_name not in feature_to_min) or (feature_to_min[feature_name] > feature_min)): feature_to_min[feature_name] = feature_min if ((feature_name not in feature_to_max) or (feature_to_max[feature_name] < feature_max)): feature_to_max[feature_name] = feature_max # Start here, we've processed all examples. output_shapes_dict = dataset.output_shapes output_types_dict = dataset.output_types for feature_name in sorted(feature_to_num_examples.keys()): # Try to fill in the schema. feature = schema.feature.add() feature.name = feature_name # TODO(afrozm): Make this work with nested structures, currently the Schema # proto has no support for it. maybe_feature_shape = output_shapes_dict[feature_name] if not isinstance(maybe_feature_shape, tf.TensorShape): logging.error( "Statistics generation doesn't work for nested structures yet") continue for dim in maybe_feature_shape.as_list(): # We denote `None`s as -1 in the shape proto. feature.shape.dim.add().size = dim if dim else -1 feature_type = output_types_dict[feature_name] feature.type = _FEATURE_TYPE_MAP.get(feature_type, schema_pb2.BYTES) common_statistics = statistics_pb2.CommonStatistics() common_statistics.num_non_missing = feature_to_num_examples[feature_name] common_statistics.num_missing = ( statistics.num_examples - common_statistics.num_non_missing) feature_name_statistics = statistics.features.add() feature_name_statistics.name = feature_name # TODO(afrozm): This can be skipped, since type information was added to # the Schema. feature_name_statistics.type = _SCHEMA_TYPE_MAP.get( feature.type, statistics_pb2.FeatureNameStatistics.BYTES) if feature.type == schema_pb2.INT or feature.type == schema_pb2.FLOAT: numeric_statistics = statistics_pb2.NumericStatistics() numeric_statistics.min = feature_to_min[feature_name] numeric_statistics.max = feature_to_max[feature_name] numeric_statistics.common_stats.CopyFrom(common_statistics) feature_name_statistics.num_stats.CopyFrom(numeric_statistics) else: # Let's shove it into BytesStatistics for now. bytes_statistics = statistics_pb2.BytesStatistics() bytes_statistics.common_stats.CopyFrom(common_statistics) feature_name_statistics.bytes_stats.CopyFrom(bytes_statistics) return statistics, schema
<SYSTEM_TASK:> Overwrite the splits if they are different from the current ones. <END_TASK> <USER_TASK:> Description: def update_splits_if_different(self, split_dict): """Overwrite the splits if they are different from the current ones. * If splits aren't already defined or different (ex: different number of shards), then the new split dict is used. This will trigger stats computation during download_and_prepare. * If splits are already defined in DatasetInfo and similar (same names and shards): keep the restored split which contains the statistics (restored from GCS or file) Args: split_dict: `tfds.core.SplitDict`, the new split """
assert isinstance(split_dict, splits_lib.SplitDict) # If splits are already defined and identical, then we do not update if self._splits and splits_lib.check_splits_equals( self._splits, split_dict): return self._set_splits(split_dict)
<SYSTEM_TASK:> Update DatasetInfo from the JSON file in `dataset_info_dir`. <END_TASK> <USER_TASK:> Description: def read_from_directory(self, dataset_info_dir): """Update DatasetInfo from the JSON file in `dataset_info_dir`. This function updates all the dynamically generated fields (num_examples, hash, time of creation,...) of the DatasetInfo. This will overwrite all previous metadata. Args: dataset_info_dir: `str` The directory containing the metadata file. This should be the root directory of a specific dataset version. """
if not dataset_info_dir: raise ValueError( "Calling read_from_directory with undefined dataset_info_dir.") json_filename = self._dataset_info_filename(dataset_info_dir) # Load the metadata from disk parsed_proto = read_from_json(json_filename) # Update splits self._set_splits(splits_lib.SplitDict.from_proto(parsed_proto.splits)) # Restore the feature metadata (vocabulary, labels names,...) if self.features: self.features.load_metadata(dataset_info_dir) # Update fields which are not defined in the code. This means that # the code will overwrite fields which are present in # dataset_info.json. for field_name, field in self.as_proto.DESCRIPTOR.fields_by_name.items(): field_value = getattr(self._info_proto, field_name) field_value_restored = getattr(parsed_proto, field_name) try: is_defined = self._info_proto.HasField(field_name) except ValueError: is_defined = bool(field_value) try: is_defined_in_restored = parsed_proto.HasField(field_name) except ValueError: is_defined_in_restored = bool(field_value_restored) # If field is defined in code, we ignore the value if is_defined: if field_value != field_value_restored: logging.info( "Field info.%s from disk and from code do not match. Keeping " "the one from code.", field_name) continue # If the field is also not defined in JSON file, we do nothing if not is_defined_in_restored: continue # Otherwise, we restore the dataset_info.json value if field.type == field.TYPE_MESSAGE: field_value.MergeFrom(field_value_restored) else: setattr(self._info_proto, field_name, field_value_restored) if self._builder._version != self.version: # pylint: disable=protected-access raise AssertionError( "The constructed DatasetInfo instance and the restored proto version " "do not match. Builder version: {}. Proto version: {}".format( self._builder._version, self.version)) # pylint: disable=protected-access # Mark as fully initialized. self._fully_initialized = True
<SYSTEM_TASK:> Map the function into each element and resolve the promise. <END_TASK> <USER_TASK:> Description: def _map_promise(map_fn, all_inputs): """Map the function into each element and resolve the promise."""
all_promises = utils.map_nested(map_fn, all_inputs) # Apply the function res = utils.map_nested(_wait_on_promise, all_promises) return res
<SYSTEM_TASK:> Store dled file to definitive place, write INFO file, return path. <END_TASK> <USER_TASK:> Description: def _handle_download_result(self, resource, tmp_dir_path, sha256, dl_size): """Store dled file to definitive place, write INFO file, return path."""
fnames = tf.io.gfile.listdir(tmp_dir_path) if len(fnames) > 1: raise AssertionError('More than one file in %s.' % tmp_dir_path) original_fname = fnames[0] tmp_path = os.path.join(tmp_dir_path, original_fname) self._recorded_sizes_checksums[resource.url] = (dl_size, sha256) if self._register_checksums: self._record_sizes_checksums() elif (dl_size, sha256) != self._sizes_checksums.get(resource.url, None): raise NonMatchingChecksumError(resource.url, tmp_path) download_path = self._get_final_dl_path(resource.url, sha256) resource_lib.write_info_file(resource, download_path, self._dataset_name, original_fname) # Unconditionally overwrite because either file doesn't exist or # FORCE_DOWNLOAD=true tf.io.gfile.rename(tmp_path, download_path, overwrite=True) tf.io.gfile.rmtree(tmp_dir_path) return download_path
<SYSTEM_TASK:> Download data for a given Kaggle competition. <END_TASK> <USER_TASK:> Description: def download_kaggle_data(self, competition_name): """Download data for a given Kaggle competition."""
with self._downloader.tqdm(): kaggle_downloader = self._downloader.kaggle_downloader(competition_name) urls = kaggle_downloader.competition_urls files = kaggle_downloader.competition_files return _map_promise(self._download, dict((f, u) for (f, u) in zip(files, urls)))
<SYSTEM_TASK:> Returns iterator over files within archive. <END_TASK> <USER_TASK:> Description: def iter_archive(self, resource): """Returns iterator over files within archive. **Important Note**: caller should read files as they are yielded. Reading out of order is slow. Args: resource: path to archive or `tfds.download.Resource`. Returns: Generator yielding tuple (path_within_archive, file_obj). """
if isinstance(resource, six.string_types): resource = resource_lib.Resource(path=resource) return extractor.iter_archive(resource.path, resource.extract_method)
<SYSTEM_TASK:> Download and extract given url_or_urls. <END_TASK> <USER_TASK:> Description: def download_and_extract(self, url_or_urls): """Download and extract given url_or_urls. Is roughly equivalent to: ``` extracted_paths = dl_manager.extract(dl_manager.download(url_or_urls)) ``` Args: url_or_urls: url or `list`/`dict` of urls to download and extract. Each url can be a `str` or `tfds.download.Resource`. If not explicitly specified in `Resource`, the extraction method will automatically be deduced from downloaded file name. Returns: extracted_path(s): `str`, extracted paths of given URL(s). """
# Add progress bar to follow the download state with self._downloader.tqdm(): with self._extractor.tqdm(): return _map_promise(self._download_extract, url_or_urls)
<SYSTEM_TASK:> Returns the directory containing the manually extracted data. <END_TASK> <USER_TASK:> Description: def manual_dir(self): """Returns the directory containing the manually extracted data."""
if not tf.io.gfile.exists(self._manual_dir): raise AssertionError( 'Manual directory {} does not exist. Create it and download/extract ' 'dataset artifacts in there.'.format(self._manual_dir)) return self._manual_dir
<SYSTEM_TASK:> Generate corrupted Cifar10 test data. <END_TASK> <USER_TASK:> Description: def _generate_examples(self, data_dir): """Generate corrupted Cifar10 test data. Apply corruptions to the raw images according to self.corruption_type. Args: data_dir: root directory of downloaded dataset Yields: dictionary with image file and label. """
corruption = self.builder_config.corruption severity = self.builder_config.severity images_file = os.path.join(data_dir, _CORRUPTIONS_TO_FILENAMES[corruption]) labels_file = os.path.join(data_dir, _LABELS_FILENAME) with tf.io.gfile.GFile(labels_file, mode='rb') as f: labels = np.load(f) num_images = labels.shape[0] // 5 # Labels are stacked 5 times so we can just read the first iteration labels = labels[:num_images] with tf.io.gfile.GFile(images_file, mode='rb') as f: images = np.load(f) # Slice images corresponding to correct severity level images = images[(severity - 1) * num_images:severity * num_images] for image, label in zip(images, labels): yield { 'image': image, 'label': label, }
<SYSTEM_TASK:> Doc string for a single builder, with or without configs. <END_TASK> <USER_TASK:> Description: def document_single_builder(builder): """Doc string for a single builder, with or without configs."""
mod_name = builder.__class__.__module__ cls_name = builder.__class__.__name__ mod_file = sys.modules[mod_name].__file__ if mod_file.endswith("pyc"): mod_file = mod_file[:-1] description_prefix = "" if builder.builder_configs: # Dataset with configs; document each one config_docs = [] for config in builder.BUILDER_CONFIGS: builder = tfds.builder(builder.name, config=config) info = builder.info # TODO(rsepassi): document the actual config object config_doc = SINGLE_CONFIG_ENTRY.format( builder_name=builder.name, config_name=config.name, description=config.description, version=config.version, feature_information=make_feature_information(info), size=tfds.units.size_str(info.size_in_bytes), ) config_docs.append(config_doc) out_str = DATASET_WITH_CONFIGS_ENTRY.format( snakecase_name=builder.name, module_and_class="%s.%s" % (tfds_mod_name(mod_name), cls_name), cls_url=cls_url(mod_name), config_names="\n".join([ CONFIG_BULLET.format(name=config.name, description=config.description, version=config.version, size=tfds.units.size_str(tfds.builder( builder.name, config=config) .info.size_in_bytes)) for config in builder.BUILDER_CONFIGS]), config_cls="%s.%s" % (tfds_mod_name(mod_name), type(builder.builder_config).__name__), configs="\n".join(config_docs), urls=format_urls(info.urls), url=url_from_info(info), supervised_keys=str(info.supervised_keys), citation=make_citation(info.citation), statistics_information=make_statistics_information(info), description=builder.info.description, description_prefix=description_prefix, ) else: info = builder.info out_str = DATASET_ENTRY.format( snakecase_name=builder.name, module_and_class="%s.%s" % (tfds_mod_name(mod_name), cls_name), cls_url=cls_url(mod_name), description=info.description, description_prefix=description_prefix, version=info.version, feature_information=make_feature_information(info), statistics_information=make_statistics_information(info), urls=format_urls(info.urls), url=url_from_info(info), supervised_keys=str(info.supervised_keys), citation=make_citation(info.citation), size=tfds.units.size_str(info.size_in_bytes), ) out_str = schema_org(builder) + "\n" + out_str return out_str
<SYSTEM_TASK:> Get all builders organized by module in nested dicts. <END_TASK> <USER_TASK:> Description: def make_module_to_builder_dict(datasets=None): """Get all builders organized by module in nested dicts."""
# pylint: disable=g-long-lambda # dict to hold tfds->image->mnist->[builders] module_to_builder = collections.defaultdict( lambda: collections.defaultdict( lambda: collections.defaultdict(list))) # pylint: enable=g-long-lambda if datasets: builders = [tfds.builder(name) for name in datasets] else: builders = [ tfds.builder(name) for name in tfds.list_builders() if name not in BUILDER_BLACKLIST ] + [tfds.builder("image_label_folder", dataset_name="image_label_folder")] for builder in builders: mod_name = builder.__class__.__module__ modules = mod_name.split(".") if "testing" in modules: continue current_mod_ctr = module_to_builder for mod in modules: current_mod_ctr = current_mod_ctr[mod] current_mod_ctr.append(builder) module_to_builder = module_to_builder["tensorflow_datasets"] return module_to_builder
<SYSTEM_TASK:> Create dataset documentation string for given datasets. <END_TASK> <USER_TASK:> Description: def dataset_docs_str(datasets=None): """Create dataset documentation string for given datasets. Args: datasets: list of datasets for which to create documentation. If None, then all available datasets will be used. Returns: string describing the datasets (in the MarkDown format). """
module_to_builder = make_module_to_builder_dict(datasets) sections = sorted(list(module_to_builder.keys())) section_tocs = [] section_docs = [] for section in sections: builders = tf.nest.flatten(module_to_builder[section]) builders = sorted(builders, key=lambda b: b.name) builder_docs = [document_single_builder(builder) for builder in builders] section_doc = SECTION_DATASETS.format( section_name=section, datasets="\n".join(builder_docs)) section_toc = create_section_toc(section, builders) section_docs.append(section_doc) section_tocs.append(section_toc) full_doc = DOC.format(toc="\n".join(section_tocs), datasets="\n".join(section_docs)) return full_doc
<SYSTEM_TASK:> Generating a Gaussian blurring kernel with disk shape. <END_TASK> <USER_TASK:> Description: def disk(radius, alias_blur=0.1, dtype=np.float32): """Generating a Gaussian blurring kernel with disk shape. Generating a Gaussian blurring kernel with disk shape using cv2 API. Args: radius: integer, radius of blurring kernel. alias_blur: float, standard deviation of Gaussian blurring. dtype: data type of kernel Returns: cv2 object of the Gaussian blurring kernel. """
if radius <= 8: length = np.arange(-8, 8 + 1) ksize = (3, 3) else: length = np.arange(-radius, radius + 1) ksize = (5, 5) x_axis, y_axis = np.meshgrid(length, length) aliased_disk = np.array((x_axis**2 + y_axis**2) <= radius**2, dtype=dtype) aliased_disk /= np.sum(aliased_disk) # supersample disk to antialias return tfds.core.lazy_imports.cv2.GaussianBlur( aliased_disk, ksize=ksize, sigmaX=alias_blur)
<SYSTEM_TASK:> Zoom image with clipping. <END_TASK> <USER_TASK:> Description: def clipped_zoom(img, zoom_factor): """Zoom image with clipping. Zoom the central part of the image and clip extra pixels. Args: img: numpy array, uncorrupted image. zoom_factor: numpy array, a sequence of float numbers for zoom factor. Returns: numpy array, zoomed image after clipping. """
h = img.shape[0] ch = int(np.ceil(h / float(zoom_factor))) top_h = (h - ch) // 2 w = img.shape[1] cw = int(np.ceil(w / float(zoom_factor))) top_w = (w - cw) // 2 img = tfds.core.lazy_imports.scipy.ndimage.zoom( img[top_h:top_h + ch, top_w:top_w + cw], (zoom_factor, zoom_factor, 1), order=1) # trim off any extra pixels trim_top_h = (img.shape[0] - h) // 2 trim_top_w = (img.shape[1] - w) // 2 return img[trim_top_h:trim_top_h + h, trim_top_w:trim_top_w + w]
<SYSTEM_TASK:> Generate a heightmap using diamond-square algorithm. <END_TASK> <USER_TASK:> Description: def plasma_fractal(mapsize=512, wibbledecay=3): """Generate a heightmap using diamond-square algorithm. Modification of the algorithm in https://github.com/FLHerne/mapgen/blob/master/diamondsquare.py Args: mapsize: side length of the heightmap, must be a power of two. wibbledecay: integer, decay factor. Returns: numpy 2d array, side length 'mapsize', of floats in [0,255]. """
if mapsize & (mapsize - 1) != 0: raise ValueError('mapsize must be a power of two.') maparray = np.empty((mapsize, mapsize), dtype=np.float_) maparray[0, 0] = 0 stepsize = mapsize wibble = 100 def wibbledmean(array): return array / 4 + wibble * np.random.uniform(-wibble, wibble, array.shape) def fillsquares(): """For each square, calculate middle value as mean of points + wibble.""" cornerref = maparray[0:mapsize:stepsize, 0:mapsize:stepsize] squareaccum = cornerref + np.roll(cornerref, shift=-1, axis=0) squareaccum += np.roll(squareaccum, shift=-1, axis=1) maparray[stepsize // 2:mapsize:stepsize, stepsize // 2:mapsize:stepsize] = wibbledmean(squareaccum) def filldiamonds(): """For each diamond, calculate middle value as meanof points + wibble.""" mapsize = maparray.shape[0] drgrid = maparray[stepsize // 2:mapsize:stepsize, stepsize // 2:mapsize:stepsize] ulgrid = maparray[0:mapsize:stepsize, 0:mapsize:stepsize] ldrsum = drgrid + np.roll(drgrid, 1, axis=0) lulsum = ulgrid + np.roll(ulgrid, -1, axis=1) ltsum = ldrsum + lulsum maparray[0:mapsize:stepsize, stepsize // 2:mapsize:stepsize] = wibbledmean(ltsum) tdrsum = drgrid + np.roll(drgrid, 1, axis=1) tulsum = ulgrid + np.roll(ulgrid, -1, axis=0) ttsum = tdrsum + tulsum maparray[stepsize // 2:mapsize:stepsize, 0:mapsize:stepsize] = wibbledmean(ttsum) while stepsize >= 2: fillsquares() filldiamonds() stepsize //= 2 wibble /= wibbledecay maparray -= maparray.min() return maparray / maparray.max()
<SYSTEM_TASK:> Gaussian noise corruption to images. <END_TASK> <USER_TASK:> Description: def gaussian_noise(x, severity=1): """Gaussian noise corruption to images. Args: x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255]. severity: integer, severity of corruption. Returns: numpy array, image with uint8 pixels in [0,255]. Added Gaussian noise. """
c = [.08, .12, 0.18, 0.26, 0.38][severity - 1] x = np.array(x) / 255. x_clip = np.clip(x + np.random.normal(size=x.shape, scale=c), 0, 1) * 255 return around_and_astype(x_clip)
<SYSTEM_TASK:> Shot noise corruption to images. <END_TASK> <USER_TASK:> Description: def shot_noise(x, severity=1): """Shot noise corruption to images. Args: x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255]. severity: integer, severity of corruption. Returns: numpy array, image with uint8 pixels in [0,255]. Added shot noise. """
c = [60, 25, 12, 5, 3][severity - 1] x = np.array(x) / 255. x_clip = np.clip(np.random.poisson(x * c) / float(c), 0, 1) * 255 return around_and_astype(x_clip)
<SYSTEM_TASK:> Impulse noise corruption to images. <END_TASK> <USER_TASK:> Description: def impulse_noise(x, severity=1): """Impulse noise corruption to images. Args: x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255]. severity: integer, severity of corruption. Returns: numpy array, image with uint8 pixels in [0,255]. Added impulse noise. """
c = [.03, .06, .09, 0.17, 0.27][severity - 1] x = tfds.core.lazy_imports.skimage.util.random_noise( np.array(x) / 255., mode='s&p', amount=c) x_clip = np.clip(x, 0, 1) * 255 return around_and_astype(x_clip)
<SYSTEM_TASK:> Defocus blurring to images. <END_TASK> <USER_TASK:> Description: def defocus_blur(x, severity=1): """Defocus blurring to images. Apply defocus blurring to images using Gaussian kernel. Args: x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255]. severity: integer, severity of corruption. Returns: numpy array, image with uint8 pixels in [0,255]. Applied defocus blur. """
c = [(3, 0.1), (4, 0.5), (6, 0.5), (8, 0.5), (10, 0.5)][severity - 1] x = np.array(x) / 255. kernel = disk(radius=c[0], alias_blur=c[1]) channels = [] for d in range(3): channels.append(tfds.core.lazy_imports.cv2.filter2D(x[:, :, d], -1, kernel)) channels = np.array(channels).transpose((1, 2, 0)) # 3x224x224 -> 224x224x3 x_clip = np.clip(channels, 0, 1) * 255 return around_and_astype(x_clip)
<SYSTEM_TASK:> Frosted glass blurring to images. <END_TASK> <USER_TASK:> Description: def frosted_glass_blur(x, severity=1): """Frosted glass blurring to images. Apply frosted glass blurring to images by shuffling pixels locally. Args: x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255]. severity: integer, severity of corruption. Returns: numpy array, image with uint8 pixels in [0,255]. Applied frosted glass blur. """
# sigma, max_delta, iterations c = [(0.7, 1, 2), (0.9, 2, 1), (1, 2, 3), (1.1, 3, 2), (1.5, 4, 2)][severity - 1] x = np.uint8( tfds.core.lazy_imports.skimage.filters.gaussian( np.array(x) / 255., sigma=c[0], multichannel=True) * 255) # locally shuffle pixels for _ in range(c[2]): for h in range(x.shape[0] - c[1], c[1], -1): for w in range(x.shape[1] - c[1], c[1], -1): dx, dy = np.random.randint(-c[1], c[1], size=(2,)) h_prime, w_prime = h + dy, w + dx # swap x[h, w], x[h_prime, w_prime] = x[h_prime, w_prime], x[h, w] x_clip = np.clip( tfds.core.lazy_imports.skimage.filters.gaussian( x / 255., sigma=c[0], multichannel=True), 0, 1) x_clip *= 255 return around_and_astype(x_clip)
<SYSTEM_TASK:> Zoom blurring to images. <END_TASK> <USER_TASK:> Description: def zoom_blur(x, severity=1): """Zoom blurring to images. Applying zoom blurring to images by zooming the central part of the images. Args: x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255]. severity: integer, severity of corruption. Returns: numpy array, image with uint8 pixels in [0,255]. Applied zoom blur. """
c = [ np.arange(1, 1.11, 0.01), np.arange(1, 1.16, 0.01), np.arange(1, 1.21, 0.02), np.arange(1, 1.26, 0.02), np.arange(1, 1.31, 0.03) ][severity - 1] x = (np.array(x) / 255.).astype(np.float32) out = np.zeros_like(x) for zoom_factor in c: out += clipped_zoom(x, zoom_factor) x = (x + out) / (len(c) + 1) x_clip = np.clip(x, 0, 1) * 255 return around_and_astype(x_clip)
<SYSTEM_TASK:> Fog corruption to images. <END_TASK> <USER_TASK:> Description: def fog(x, severity=1): """Fog corruption to images. Adding fog to images. Fog is generated by diamond-square algorithm. Args: x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255]. severity: integer, severity of corruption. Returns: numpy array, image with uint8 pixels in [0,255]. Added fog. """
c = [(1.5, 2), (2., 2), (2.5, 1.7), (2.5, 1.5), (3., 1.4)][severity - 1] x = np.array(x) / 255. max_val = x.max() mapsize = 512 shape = x.shape max_length = max(shape[0], shape[1]) if max_length > mapsize: mapsize = 2**int(np.ceil(np.log2(float(max_length)))) tmp = plasma_fractal(mapsize=mapsize, wibbledecay=c[1]) tmp = tmp[:x.shape[0], :x.shape[1]] tmp = tmp[..., np.newaxis] x += c[0] * tmp x_clip = np.clip(x * max_val / (max_val + c[0]), 0, 1) * 255 return around_and_astype(x_clip)
<SYSTEM_TASK:> Change brightness of images. <END_TASK> <USER_TASK:> Description: def brightness(x, severity=1): """Change brightness of images. Args: x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255]. severity: integer, severity of corruption. Returns: numpy array, image with uint8 pixels in [0,255]. Changed brightness. """
c = [.1, .2, .3, .4, .5][severity - 1] x = np.array(x) / 255. x = tfds.core.lazy_imports.skimage.color.rgb2hsv(x) x[:, :, 2] = np.clip(x[:, :, 2] + c, 0, 1) x = tfds.core.lazy_imports.skimage.color.hsv2rgb(x) x_clip = np.clip(x, 0, 1) * 255 return around_and_astype(x_clip)
<SYSTEM_TASK:> Change contrast of images. <END_TASK> <USER_TASK:> Description: def contrast(x, severity=1): """Change contrast of images. Args: x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255]. severity: integer, severity of corruption. Returns: numpy array, image with uint8 pixels in [0,255]. Changed contrast. """
c = [0.4, .3, .2, .1, .05][severity - 1] x = np.array(x) / 255. means = np.mean(x, axis=(0, 1), keepdims=True) x_clip = np.clip((x - means) * c + means, 0, 1) * 255 return around_and_astype(x_clip)
<SYSTEM_TASK:> Pixelate images. <END_TASK> <USER_TASK:> Description: def pixelate(x, severity=1): """Pixelate images. Conduct pixelating corruptions to images by first shrinking the images and then resizing to original size. Args: x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255]. severity: integer, severity of corruption. Returns: numpy array, image with uint8 pixels in [0,255]. Applied pixelating corruption. """
c = [0.6, 0.5, 0.4, 0.3, 0.25][severity - 1] shape = x.shape x = tfds.core.lazy_imports.PIL_Image.fromarray(x.astype(np.uint8)) x = x.resize((int(shape[1] * c), int(shape[0] * c))) x = x.resize((shape[1], shape[0])) return np.asarray(x)
<SYSTEM_TASK:> Conduct jpeg compression to images. <END_TASK> <USER_TASK:> Description: def jpeg_compression(x, severity=1): """Conduct jpeg compression to images. Args: x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255]. severity: integer, severity of corruption. Returns: numpy array, image with uint8 pixels in [0,255]. Applied jpeg compression. """
c = [25, 18, 15, 10, 7][severity - 1] x = tfds.core.lazy_imports.PIL_Image.fromarray(x.astype(np.uint8)) output = io.BytesIO() x.save(output, 'JPEG', quality=c) output.seek(0) x = tfds.core.lazy_imports.PIL_Image.open(output) return np.asarray(x)
<SYSTEM_TASK:> Temporarily assign obj.attr to value. <END_TASK> <USER_TASK:> Description: def temporary_assignment(obj, attr, value): """Temporarily assign obj.attr to value."""
original = getattr(obj, attr, None) setattr(obj, attr, value) yield setattr(obj, attr, original)
<SYSTEM_TASK:> Iterate over items of dictionaries grouped by their keys. <END_TASK> <USER_TASK:> Description: def zip_dict(*dicts): """Iterate over items of dictionaries grouped by their keys."""
for key in set(itertools.chain(*dicts)): # set merge all keys # Will raise KeyError if the dict don't have the same keys yield key, tuple(d[key] for d in dicts)
<SYSTEM_TASK:> Apply a function recursively to each element of a nested data struct. <END_TASK> <USER_TASK:> Description: def map_nested(function, data_struct, dict_only=False, map_tuple=False): """Apply a function recursively to each element of a nested data struct."""
# Could add support for more exotic data_struct, like OrderedDict if isinstance(data_struct, dict): return { k: map_nested(function, v, dict_only, map_tuple) for k, v in data_struct.items() } elif not dict_only: types = [list] if map_tuple: types.append(tuple) if isinstance(data_struct, tuple(types)): mapped = [map_nested(function, v, dict_only, map_tuple) for v in data_struct] if isinstance(data_struct, list): return mapped else: return tuple(mapped) # Singleton return function(data_struct)
<SYSTEM_TASK:> Zip data struct together and return a data struct with the same shape. <END_TASK> <USER_TASK:> Description: def zip_nested(arg0, *args, **kwargs): """Zip data struct together and return a data struct with the same shape."""
# Python 2 do not support kwargs only arguments dict_only = kwargs.pop("dict_only", False) assert not kwargs # Could add support for more exotic data_struct, like OrderedDict if isinstance(arg0, dict): return { k: zip_nested(*a, dict_only=dict_only) for k, a in zip_dict(arg0, *args) } elif not dict_only: if isinstance(arg0, list): return [zip_nested(*a, dict_only=dict_only) for a in zip(arg0, *args)] # Singleton return (arg0,) + args
<SYSTEM_TASK:> Simulate proto inheritance. <END_TASK> <USER_TASK:> Description: def as_proto_cls(proto_cls): """Simulate proto inheritance. By default, protobuf do not support direct inheritance, so this decorator simulates inheritance to the class to which it is applied. Example: ``` @as_proto_class(proto.MyProto) class A(object): def custom_method(self): return self.proto_field * 10 p = proto.MyProto(proto_field=123) a = A() a.CopyFrom(p) # a is like a proto object assert a.proto_field == 123 a.custom_method() # But has additional methods ``` Args: proto_cls: The protobuf class to inherit from Returns: decorated_cls: The decorated class """
def decorator(cls): """Decorator applied to the class.""" class ProtoCls(object): """Base class simulating the protobuf.""" def __init__(self, *args, **kwargs): super(ProtoCls, self).__setattr__( "_ProtoCls__proto", proto_cls(*args, **kwargs), ) def __getattr__(self, attr_name): return getattr(self.__proto, attr_name) def __setattr__(self, attr_name, new_value): try: return setattr(self.__proto, attr_name, new_value) except AttributeError: return super(ProtoCls, self).__setattr__(attr_name, new_value) def __eq__(self, other): return self.__proto, other.get_proto() def get_proto(self): return self.__proto def __repr__(self): return "<{cls_name}\n{proto_repr}\n>".format( cls_name=cls.__name__, proto_repr=repr(self.__proto)) decorator_cls = type(cls.__name__, (cls, ProtoCls), { "__doc__": cls.__doc__, }) return decorator_cls return decorator
<SYSTEM_TASK:> Writes to path atomically, by writing to temp file and renaming it. <END_TASK> <USER_TASK:> Description: def atomic_write(path, mode): """Writes to path atomically, by writing to temp file and renaming it."""
tmp_path = "%s%s_%s" % (path, constants.INCOMPLETE_SUFFIX, uuid.uuid4().hex) with tf.io.gfile.GFile(tmp_path, mode) as file_: yield file_ tf.io.gfile.rename(tmp_path, path, overwrite=True)
<SYSTEM_TASK:> Given a hash constructor, returns checksum digest and size of file. <END_TASK> <USER_TASK:> Description: def read_checksum_digest(path, checksum_cls=hashlib.sha256): """Given a hash constructor, returns checksum digest and size of file."""
checksum = checksum_cls() size = 0 with tf.io.gfile.GFile(path, "rb") as f: while True: block = f.read(io.DEFAULT_BUFFER_SIZE) size += len(block) if not block: break checksum.update(block) return checksum.hexdigest(), size
<SYSTEM_TASK:> Reraise an exception with an additional message. <END_TASK> <USER_TASK:> Description: def reraise(additional_msg): """Reraise an exception with an additional message."""
exc_type, exc_value, exc_traceback = sys.exc_info() msg = str(exc_value) + "\n" + additional_msg six.reraise(exc_type, exc_type(msg), exc_traceback)
<SYSTEM_TASK:> Get attr that handles dots in attr name. <END_TASK> <USER_TASK:> Description: def rgetattr(obj, attr, *args): """Get attr that handles dots in attr name."""
def _getattr(obj, attr): return getattr(obj, attr, *args) return functools.reduce(_getattr, [obj] + attr.split("."))
<SYSTEM_TASK:> Update the encoding format. <END_TASK> <USER_TASK:> Description: def set_encoding_format(self, encoding_format): """Update the encoding format."""
supported = ENCODE_FN.keys() if encoding_format not in supported: raise ValueError('`encoding_format` must be one of %s.' % supported) self._encoding_format = encoding_format
<SYSTEM_TASK:> Returns np_image encoded as jpeg or png. <END_TASK> <USER_TASK:> Description: def _encode_image(self, np_image): """Returns np_image encoded as jpeg or png."""
if np_image.dtype != np.uint8: raise ValueError('Image should be uint8. Detected: %s.' % np_image.dtype) utils.assert_shape_match(np_image.shape, self._shape) return self._runner.run(ENCODE_FN[self._encoding_format], np_image)
<SYSTEM_TASK:> Convert the given image into a dict convertible to tf example. <END_TASK> <USER_TASK:> Description: def encode_example(self, image_or_path_or_fobj): """Convert the given image into a dict convertible to tf example."""
if isinstance(image_or_path_or_fobj, np.ndarray): encoded_image = self._encode_image(image_or_path_or_fobj) elif isinstance(image_or_path_or_fobj, six.string_types): with tf.io.gfile.GFile(image_or_path_or_fobj, 'rb') as image_f: encoded_image = image_f.read() else: encoded_image = image_or_path_or_fobj.read() return encoded_image
<SYSTEM_TASK:> Reconstruct the image from the tf example. <END_TASK> <USER_TASK:> Description: def decode_example(self, example): """Reconstruct the image from the tf example."""
img = tf.image.decode_image( example, channels=self._shape[-1], dtype=tf.uint8) img.set_shape(self._shape) return img
<SYSTEM_TASK:> Create a moving image sequence from the given image a left padding values. <END_TASK> <USER_TASK:> Description: def _create_moving_sequence(image, pad_lefts, total_padding): """Create a moving image sequence from the given image a left padding values. Args: image: [in_h, in_w, n_channels] uint8 array pad_lefts: [sequence_length, 2] int32 array of left padding values total_padding: tensor of padding values, (pad_h, pad_w) Returns: [sequence_length, out_h, out_w, n_channels] uint8 image sequence, where out_h = in_h + pad_h, out_w = in_w + out_w """
with tf.name_scope("moving_sequence"): def get_padded_image(args): pad_left, = args pad_right = total_padding - pad_left padding = tf.stack([pad_left, pad_right], axis=-1) z = tf.zeros((1, 2), dtype=pad_left.dtype) padding = tf.concat([padding, z], axis=0) return tf.pad(image, padding) padded_images = tf.map_fn( get_padded_image, [pad_lefts], dtype=tf.uint8, infer_shape=False, back_prop=False) return padded_images
<SYSTEM_TASK:> Construct a linear trajectory from x0. <END_TASK> <USER_TASK:> Description: def _get_linear_trajectory(x0, velocity, t): """Construct a linear trajectory from x0. Args: x0: N-D float tensor. velocity: N-D float tensor t: [sequence_length]-length float tensor Returns: x: [sequence_length, ndims] float tensor. """
x0 = tf.convert_to_tensor(x0) velocity = tf.convert_to_tensor(velocity) t = tf.convert_to_tensor(t) if x0.shape.ndims != 1: raise ValueError("x0 must be a rank 1 tensor") if velocity.shape.ndims != 1: raise ValueError("velocity must be a rank 1 tensor") if t.shape.ndims != 1: raise ValueError("t must be a rank 1 tensor") x0 = tf.expand_dims(x0, axis=0) velocity = tf.expand_dims(velocity, axis=0) dx = velocity * tf.expand_dims(t, axis=-1) linear_trajectories = x0 + dx assert linear_trajectories.shape.ndims == 2, \ "linear_trajectories should be a rank 2 tensor" return linear_trajectories
<SYSTEM_TASK:> Turn simple static images into sequences of the originals bouncing around. <END_TASK> <USER_TASK:> Description: def image_as_moving_sequence( image, sequence_length=20, output_size=(64, 64), velocity=0.1, start_position=None): """Turn simple static images into sequences of the originals bouncing around. Adapted from Srivastava et al. http://www.cs.toronto.edu/~nitish/unsupervised_video/ Example usage: ```python import tensorflow as tf import tensorflow_datasets as tfds from tensorflow_datasets.video import moving_sequence tf.compat.v1.enable_eager_execution() def animate(sequence): import numpy as np import matplotlib.pyplot as plt import matplotlib.animation as animation sequence = np.squeeze(sequence, axis=-1) fig = plt.figure() plt.axis("off") ims = [[plt.imshow(im, cmap="gray", animated=True)] for im in sequence] # don't remove `anim =` as linter may suggets # weird behaviour, plot will freeze on last frame anim = animation.ArtistAnimation( fig, ims, interval=50, blit=True, repeat_delay=100) plt.show() plt.close() tf.enable_eager_execution() mnist_ds = tfds.load("mnist", split=tfds.Split.TRAIN, as_supervised=True) mnist_ds = mnist_ds.repeat().shuffle(1024) def map_fn(image, label): sequence = moving_sequence.image_as_moving_sequence( image, sequence_length=20) return sequence.image_sequence moving_mnist_ds = mnist_ds.map(map_fn).batch(2).map( lambda x: dict(image_sequence=tf.reduce_max(x, axis=0))) # # for comparison with test data provided by original authors # moving_mnist_ds = tfds.load("moving_mnist", split=tfds.Split.TEST) for seq in moving_mnist_ds: animate(seq["image_sequence"].numpy()) ``` Args: image: [in_h, in_w, n_channels] tensor defining the sub-image to be bouncing around. sequence_length: int, length of sequence. output_size: (out_h, out_w) size returned images. velocity: scalar speed or 2D velocity of image. If scalar, the 2D velocity is randomly generated with this magnitude. This is the normalized distance moved each time step by the sub-image, where normalization occurs over the feasible distance the sub-image can move e.g if the input image is [10 x 10] and the output image is [60 x 60], a speed of 0.1 means the sub-image moves (60 - 10) * 0.1 = 5 pixels per time step. start_position: 2D float32 normalized initial position of each image in [0, 1]. Randomized uniformly if not given. Returns: `MovingSequence` namedtuple containing: `image_sequence`: [sequence_length, out_h, out_w, n_channels] image at each time step. padded values are all zero. Same dtype as input image. `trajectory`: [sequence_length, 2] float32 in [0, 1] 2D normalized coordinates of the image at every time step. `start_position`: 2D float32 initial position in [0, 1]. 2D normalized initial position of image. Same as input if provided, otherwise the randomly value generated. `velocity`: 2D float32 normalized velocity. Same as input velocity if provided as a 2D tensor, otherwise the random velocity generated. """
ndims = 2 image = tf.convert_to_tensor(image) if image.shape.ndims != 3: raise ValueError("image must be rank 3, got %s" % str(image)) output_size = tf.TensorShape(output_size) if len(output_size) != ndims: raise ValueError("output_size must have exactly %d elements, got %s" % (ndims, output_size)) image_shape = tf.shape(image) if start_position is None: start_position = tf.random.uniform((ndims,), dtype=tf.float32) elif start_position.shape != (ndims,): raise ValueError("start_positions must (%d,)" % ndims) velocity = tf.convert_to_tensor(velocity, dtype=tf.float32) if velocity.shape.ndims == 0: velocity = _get_random_unit_vector(ndims, tf.float32) * velocity elif velocity.shape.ndims != 1: raise ValueError("velocity must be rank 0 or rank 1, got %s" % velocity) t = tf.range(sequence_length, dtype=tf.float32) trajectory = _get_linear_trajectory(start_position, velocity, t) trajectory = _bounce_to_bbox(trajectory) total_padding = output_size - image_shape[:2] if not tf.executing_eagerly(): cond = tf.compat.v1.assert_greater(total_padding, -1) with tf.control_dependencies([cond]): total_padding = tf.identity(total_padding) sequence_pad_lefts = tf.cast( tf.math.round(trajectory * tf.cast(total_padding, tf.float32)), tf.int32) sequence = _create_moving_sequence(image, sequence_pad_lefts, total_padding) sequence.set_shape( [sequence_length] + output_size.as_list() + [image.shape[-1]]) return MovingSequence( image_sequence=sequence, trajectory=trajectory, start_position=start_position, velocity=velocity)
<SYSTEM_TASK:> Returns True if other_version matches. <END_TASK> <USER_TASK:> Description: def match(self, other_version): """Returns True if other_version matches. Args: other_version: string, of the form "x[.y[.x]]" where {x,y,z} can be a number or a wildcard. """
major, minor, patch = _str_to_version(other_version, allow_wildcard=True) return (major in [self.major, "*"] and minor in [self.minor, "*"] and patch in [self.patch, "*"])
<SYSTEM_TASK:> Returns labels for validation. <END_TASK> <USER_TASK:> Description: def _get_validation_labels(val_path): """Returns labels for validation. Args: val_path: path to TAR file containing validation images. It is used to retrieve the name of pictures and associate them to labels. Returns: dict, mapping from image name (str) to label (str). """
labels_path = tfds.core.get_tfds_path(_VALIDATION_LABELS_FNAME) with tf.io.gfile.GFile(labels_path) as labels_f: labels = labels_f.read().strip().split('\n') with tf.io.gfile.GFile(val_path, 'rb') as tar_f_obj: tar = tarfile.open(mode='r:', fileobj=tar_f_obj) images = sorted(tar.getnames()) return dict(zip(images, labels))
<SYSTEM_TASK:> Whether any of the filenames exist. <END_TASK> <USER_TASK:> Description: def do_files_exist(filenames): """Whether any of the filenames exist."""
preexisting = [tf.io.gfile.exists(f) for f in filenames] return any(preexisting)
<SYSTEM_TASK:> Returns a temporary filename based on filename. <END_TASK> <USER_TASK:> Description: def get_incomplete_path(filename): """Returns a temporary filename based on filename."""
random_suffix = "".join( random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) return filename + ".incomplete" + random_suffix
<SYSTEM_TASK:> Create temporary files for filenames and rename on exit. <END_TASK> <USER_TASK:> Description: def _incomplete_files(filenames): """Create temporary files for filenames and rename on exit."""
tmp_files = [get_incomplete_path(f) for f in filenames] try: yield tmp_files for tmp, output in zip(tmp_files, filenames): tf.io.gfile.rename(tmp, output) finally: for tmp in tmp_files: if tf.io.gfile.exists(tmp): tf.io.gfile.remove(tmp)
<SYSTEM_TASK:> Create temporary dir for dirname and rename on exit. <END_TASK> <USER_TASK:> Description: def incomplete_dir(dirname): """Create temporary dir for dirname and rename on exit."""
tmp_dir = get_incomplete_path(dirname) tf.io.gfile.makedirs(tmp_dir) try: yield tmp_dir tf.io.gfile.rename(tmp_dir, dirname) finally: if tf.io.gfile.exists(tmp_dir): tf.io.gfile.rmtree(tmp_dir)
<SYSTEM_TASK:> Shuffle a single record file in memory. <END_TASK> <USER_TASK:> Description: def _shuffle_tfrecord(path, random_gen): """Shuffle a single record file in memory."""
# Read all records record_iter = tf.compat.v1.io.tf_record_iterator(path) all_records = [ r for r in utils.tqdm( record_iter, desc="Reading...", unit=" examples", leave=False) ] # Shuffling in memory random_gen.shuffle(all_records) # Write all record back with tf.io.TFRecordWriter(path) as writer: for record in utils.tqdm( all_records, desc="Writing...", unit=" examples", leave=False): writer.write(record)
<SYSTEM_TASK:> Writes generated str records to output_files in round-robin order. <END_TASK> <USER_TASK:> Description: def _write_tfrecords_from_generator(generator, output_files, shuffle=True): """Writes generated str records to output_files in round-robin order."""
if do_files_exist(output_files): raise ValueError( "Pre-processed files already exists: {}.".format(output_files)) with _incomplete_files(output_files) as tmp_files: # Write all shards writers = [tf.io.TFRecordWriter(fname) for fname in tmp_files] with _close_on_exit(writers) as writers: logging.info("Writing TFRecords") _round_robin_write(writers, generator) # Shuffle each shard if shuffle: # WARNING: Using np instead of Python random because Python random # produce different values between Python 2 and 3 and between # architectures random_gen = np.random.RandomState(42) for path in utils.tqdm( tmp_files, desc="Shuffling...", unit=" shard", leave=False): _shuffle_tfrecord(path, random_gen=random_gen)
<SYSTEM_TASK:> Write records from generator round-robin across writers. <END_TASK> <USER_TASK:> Description: def _round_robin_write(writers, generator): """Write records from generator round-robin across writers."""
for i, example in enumerate(utils.tqdm( generator, unit=" examples", leave=False)): writers[i % len(writers)].write(example)
<SYSTEM_TASK:> Wrapper around Tqdm which can be updated in threads. <END_TASK> <USER_TASK:> Description: def _async_tqdm(*args, **kwargs): """Wrapper around Tqdm which can be updated in threads. Usage: ``` with utils.async_tqdm(...) as pbar: # pbar can then be modified inside a thread # pbar.update_total(3) # pbar.update() ``` Args: *args: args of tqdm **kwargs: kwargs of tqdm Yields: pbar: Async pbar which can be shared between threads. """
with tqdm_lib.tqdm(*args, **kwargs) as pbar: pbar = _TqdmPbarAsync(pbar) yield pbar pbar.clear() # pop pbar from the active list of pbar print()
<SYSTEM_TASK:> Copy data read from src file obj to new file in dest_path. <END_TASK> <USER_TASK:> Description: def _copy(src_file, dest_path): """Copy data read from src file obj to new file in dest_path."""
tf.io.gfile.makedirs(os.path.dirname(dest_path)) with tf.io.gfile.GFile(dest_path, 'wb') as dest_file: while True: data = src_file.read(io.DEFAULT_BUFFER_SIZE) if not data: break dest_file.write(data)
<SYSTEM_TASK:> Add a progression bar for the current extraction. <END_TASK> <USER_TASK:> Description: def tqdm(self): """Add a progression bar for the current extraction."""
with utils.async_tqdm( total=0, desc='Extraction completed...', unit=' file') as pbar_path: self._pbar_path = pbar_path yield
<SYSTEM_TASK:> Convert the given value to Feature if necessary. <END_TASK> <USER_TASK:> Description: def to_feature(value): """Convert the given value to Feature if necessary."""
if isinstance(value, FeatureConnector): return value elif utils.is_dtype(value): # tf.int32, tf.string,... return Tensor(shape=(), dtype=tf.as_dtype(value)) elif isinstance(value, dict): return FeaturesDict(value) else: raise ValueError('Feature not supported: {}'.format(value))
<SYSTEM_TASK:> Decode the given feature from the tfexample_dict. <END_TASK> <USER_TASK:> Description: def decode_single_feature_from_dict( feature_k, feature, tfexample_dict): """Decode the given feature from the tfexample_dict. Args: feature_k (str): Feature key in the tfexample_dict feature (FeatureConnector): Connector object to use to decode the field tfexample_dict (dict): Dict containing the data to decode. Returns: decoded_feature: The output of the feature.decode_example """
# Singleton case if not feature.serialized_keys: data_to_decode = tfexample_dict[feature_k] # Feature contains sub features else: # Extract the sub-features from the global feature dict data_to_decode = { k: tfexample_dict[posixpath.join(feature_k, k)] for k in feature.serialized_keys } return feature.decode_example(data_to_decode)
<SYSTEM_TASK:> Ensure the two list of keys matches. <END_TASK> <USER_TASK:> Description: def _assert_keys_match(keys1, keys2): """Ensure the two list of keys matches."""
if set(keys1) != set(keys2): raise ValueError('{} {}'.format(list(keys1), list(keys2)))
<SYSTEM_TASK:> Unpack the celeba config file. <END_TASK> <USER_TASK:> Description: def _process_celeba_config_file(self, file_path): """Unpack the celeba config file. The file starts with the number of lines, and a header. Afterwards, there is a configuration for each file: one per line. Args: file_path: Path to the file with the configuration. Returns: keys: names of the attributes values: map from the file name to the list of attribute values for this file. """
with tf.io.gfile.GFile(file_path) as f: data_raw = f.read() lines = data_raw.split("\n") keys = lines[1].strip().split() values = {} # Go over each line (skip the last one, as it is empty). for line in lines[2:-1]: row_values = line.strip().split() # Each row start with the 'file_name' and then space-separated values. values[row_values[0]] = [int(v) for v in row_values[1:]] return keys, values
<SYSTEM_TASK:> Generate QuickDraw bitmap examples. <END_TASK> <USER_TASK:> Description: def _generate_examples(self, file_paths): """Generate QuickDraw bitmap examples. Given a list of file paths with data for each class label, generate examples in a random order. Args: file_paths: (dict of {str: str}) the paths to files containing the data, indexed by label. Yields: The QuickDraw examples, as defined in the dataset info features. """
for label, path in sorted(file_paths.items(), key=lambda x: x[0]): with tf.io.gfile.GFile(path, "rb") as f: class_images = np.load(f) for np_image in class_images: yield { "image": np_image.reshape(_QUICKDRAW_IMAGE_SHAPE), "label": label, }
<SYSTEM_TASK:> Attempt to import tensorflow, and ensure its version is sufficient. <END_TASK> <USER_TASK:> Description: def ensure_tf_install(): # pylint: disable=g-statement-before-imports """Attempt to import tensorflow, and ensure its version is sufficient. Raises: ImportError: if either tensorflow is not importable or its version is inadequate. """
try: import tensorflow as tf except ImportError: # Print more informative error message, then reraise. print("\n\nFailed to import TensorFlow. Please note that TensorFlow is not " "installed by default when you install TensorFlow Datasets. This is " "so that users can decide whether to install the GPU-enabled " "TensorFlow package. To use TensorFlow Datasets, please install the " "most recent version of TensorFlow, by following instructions at " "https://tensorflow.org/install.\n\n") raise tf_version = distutils.version.LooseVersion(tf.__version__) v_1_12 = distutils.version.LooseVersion("1.12.0") if tf_version < v_1_12: raise ImportError( "This version of TensorFlow Datasets requires TensorFlow " "version >= {required}; Detected an installation of version {present}. " "Please upgrade TensorFlow to proceed.".format( required="1.12.0", present=tf.__version__)) _patch_tf(tf)
<SYSTEM_TASK:> Patch TF to maintain compatibility across versions. <END_TASK> <USER_TASK:> Description: def _patch_tf(tf): """Patch TF to maintain compatibility across versions."""
global TF_PATCH if TF_PATCH: return v_1_12 = distutils.version.LooseVersion("1.12.0") v_1_13 = distutils.version.LooseVersion("1.13.0") v_2 = distutils.version.LooseVersion("2.0.0") tf_version = distutils.version.LooseVersion(tf.__version__) if v_1_12 <= tf_version < v_1_13: # TODO(b/123930850): remove when 1.13 is stable. TF_PATCH = "tf1_12" _patch_for_tf1_12(tf) elif v_1_13 <= tf_version < v_2: TF_PATCH = "tf1_13" _patch_for_tf1_13(tf) else: TF_PATCH = "tf2" _patch_for_tf2(tf)
<SYSTEM_TASK:> Whether ds is a Dataset. Compatible across TF versions. <END_TASK> <USER_TASK:> Description: def is_dataset(ds): """Whether ds is a Dataset. Compatible across TF versions."""
import tensorflow as tf from tensorflow_datasets.core.utils import py_utils dataset_types = [tf.data.Dataset] v1_ds = py_utils.rgetattr(tf, "compat.v1.data.Dataset", None) v2_ds = py_utils.rgetattr(tf, "compat.v2.data.Dataset", None) if v1_ds is not None: dataset_types.append(v1_ds) if v2_ds is not None: dataset_types.append(v2_ds) return isinstance(ds, tuple(dataset_types))
<SYSTEM_TASK:> Returns SplitGenerators from the folder names. <END_TASK> <USER_TASK:> Description: def _split_generators(self, dl_manager): """Returns SplitGenerators from the folder names."""
# At data creation time, parse the folder to deduce number of splits, # labels, image size, # The splits correspond to the high level folders split_names = list_folders(dl_manager.manual_dir) # Extract all label names and associated images split_label_images = {} # dict[split_name][label_name] = list(img_paths) for split_name in split_names: split_dir = os.path.join(dl_manager.manual_dir, split_name) split_label_images[split_name] = { label_name: list_imgs(os.path.join(split_dir, label_name)) for label_name in list_folders(split_dir) } # Merge all label names from all splits to get the final list of labels # Sorted list for determinism labels = [split.keys() for split in split_label_images.values()] labels = list(sorted(set(itertools.chain(*labels)))) # Could improve the automated encoding format detection # Extract the list of all image paths image_paths = [ image_paths for label_images in split_label_images.values() for image_paths in label_images.values() ] if any(f.lower().endswith(".png") for f in itertools.chain(*image_paths)): encoding_format = "png" else: encoding_format = "jpeg" # Update the info.features. Those info will be automatically resored when # the dataset is re-created self.info.features["image"].set_encoding_format(encoding_format) self.info.features["label"].names = labels def num_examples(label_images): return sum(len(imgs) for imgs in label_images.values()) # Define the splits return [ tfds.core.SplitGenerator( name=split_name, # The number of shards is a dynamic function of the total # number of images (between 0-10) num_shards=min(10, max(num_examples(label_images) // 1000, 1)), gen_kwargs=dict(label_images=label_images,), ) for split_name, label_images in split_label_images.items() ]
<SYSTEM_TASK:> Generate example for each image in the dict. <END_TASK> <USER_TASK:> Description: def _generate_examples(self, label_images): """Generate example for each image in the dict."""
for label, image_paths in label_images.items(): for image_path in image_paths: yield { "image": image_path, "label": label, }
<SYSTEM_TASK:> Generate corrupted imagenet validation data. <END_TASK> <USER_TASK:> Description: def _generate_examples_validation(self, archive, labels): """Generate corrupted imagenet validation data. Apply corruptions to the raw images according to self.corruption_type. Args: archive: an iterator for the raw dataset. labels: a dictionary that maps the file names to imagenet labels. Yields: dictionary with the file name, an image file objective, and label of each imagenet validation data. """
# Get the current random seeds. numpy_st0 = np.random.get_state() # Set new random seeds. np.random.seed(135) logging.warning('Overwriting cv2 RNG seed.') tfds.core.lazy_imports.cv2.setRNGSeed(357) for example in super(Imagenet2012Corrupted, self)._generate_examples_validation(archive, labels): with tf.Graph().as_default(): tf_img = tf.image.decode_jpeg(example['image'].read(), channels=3) image_np = tfds.as_numpy(tf_img) example['image'] = self._get_corrupted_example(image_np) yield example # Reset the seeds back to their original values. np.random.set_state(numpy_st0)
<SYSTEM_TASK:> Return corrupted images. <END_TASK> <USER_TASK:> Description: def _get_corrupted_example(self, x): """Return corrupted images. Args: x: numpy array, uncorrupted image. Returns: numpy array, corrupted images. """
corruption_type = self.builder_config.corruption_type severity = self.builder_config.severity return { 'gaussian_noise': corruptions.gaussian_noise, 'shot_noise': corruptions.shot_noise, 'impulse_noise': corruptions.impulse_noise, 'defocus_blur': corruptions.defocus_blur, 'frosted_glass_blur': corruptions.frosted_glass_blur, 'zoom_blur': corruptions.zoom_blur, 'fog': corruptions.fog, 'brightness': corruptions.brightness, 'contrast': corruptions.contrast, 'elastic': corruptions.elastic, 'pixelate': corruptions.pixelate, 'jpeg_compression': corruptions.jpeg_compression, }[corruption_type](x, severity)
<SYSTEM_TASK:> Ensure the shape1 match the pattern given by shape2. <END_TASK> <USER_TASK:> Description: def assert_shape_match(shape1, shape2): """Ensure the shape1 match the pattern given by shape2. Ex: assert_shape_match((64, 64, 3), (None, None, 3)) Args: shape1 (tuple): Static shape shape2 (tuple): Dynamic shape (can contain None) """
shape1 = tf.TensorShape(shape1) shape2 = tf.TensorShape(shape2) if shape1.ndims is None or shape2.ndims is None: raise ValueError('Shapes must have known rank. Got %s and %s.' % (shape1.ndims, shape2.ndims)) shape1.assert_same_rank(shape2) shape1.assert_is_compatible_with(shape2)
<SYSTEM_TASK:> Create a new graph for the given args. <END_TASK> <USER_TASK:> Description: def _build_graph_run(self, run_args): """Create a new graph for the given args."""
# Could try to use tfe.py_func(fct) but this would require knowing # information about the signature of the function. # Create a new graph: with tf.Graph().as_default() as g: # Create placeholder input_ = run_args.input placeholder = tf.compat.v1.placeholder( dtype=input_.dtype, shape=input_.shape) output = run_args.fct(placeholder) return GraphRun( session=raw_nogpu_session(g), graph=g, placeholder=placeholder, output=output, )
<SYSTEM_TASK:> Converts the given image into a dict convertible to tf example. <END_TASK> <USER_TASK:> Description: def encode_example(self, video_or_path_or_fobj): """Converts the given image into a dict convertible to tf example."""
if isinstance(video_or_path_or_fobj, six.string_types): if not os.path.isfile(video_or_path_or_fobj): _, video_temp_path = tempfile.mkstemp() try: tf.gfile.Copy(video_or_path_or_fobj, video_temp_path, overwrite=True) encoded_video = self._ffmpeg_decode(video_temp_path) finally: os.unlink(video_temp_path) else: encoded_video = self._ffmpeg_decode(video_or_path_or_fobj) elif hasattr(video_or_path_or_fobj, 'read'): encoded_video = self._ffmpeg_decode(video_or_path_or_fobj) else: encoded_video = video_or_path_or_fobj return super(Video, self).encode_example(encoded_video)
<SYSTEM_TASK:> Generate rock, paper or scissors images and labels given the directory path. <END_TASK> <USER_TASK:> Description: def _generate_examples(self, archive): """Generate rock, paper or scissors images and labels given the directory path. Args: archive: object that iterates over the zip. Yields: The image path and its corresponding label. """
for fname, fobj in archive: res = _NAME_RE.match(fname) if not res: # if anything other than .png; skip continue label = res.group(2).lower() yield { "image": fobj, "label": label, }
<SYSTEM_TASK:> Generate features and target given the directory path. <END_TASK> <USER_TASK:> Description: def _generate_examples(self, file_path): """Generate features and target given the directory path. Args: file_path: path where the csv file is stored Yields: The features and the target """
with tf.io.gfile.GFile(file_path) as f: raw_data = csv.DictReader(f) for row in raw_data: survive_val = row.pop("survived") yield { "survived": convert_to_label(survive_val, _SURVIVED_DICT), "features": { name: FEATURE_DICT[name][1](value) for name, value in row.items() } }
<SYSTEM_TASK:> Prepare reserved tokens and a regex for splitting them out of strings. <END_TASK> <USER_TASK:> Description: def _prepare_reserved_tokens(reserved_tokens): """Prepare reserved tokens and a regex for splitting them out of strings."""
reserved_tokens = [tf.compat.as_text(tok) for tok in reserved_tokens or []] dups = _find_duplicates(reserved_tokens) if dups: raise ValueError("Duplicates found in tokens: %s" % dups) reserved_tokens_re = _make_reserved_tokens_re(reserved_tokens) return reserved_tokens, reserved_tokens_re
<SYSTEM_TASK:> Constructs compiled regex to parse out reserved tokens. <END_TASK> <USER_TASK:> Description: def _make_reserved_tokens_re(reserved_tokens): """Constructs compiled regex to parse out reserved tokens."""
if not reserved_tokens: return None escaped_tokens = [_re_escape(rt) for rt in reserved_tokens] pattern = "(%s)" % "|".join(escaped_tokens) reserved_tokens_re = _re_compile(pattern) return reserved_tokens_re
<SYSTEM_TASK:> Writes lines to file prepended by header and metadata. <END_TASK> <USER_TASK:> Description: def write_lines_to_file(cls_name, filename, lines, metadata_dict): """Writes lines to file prepended by header and metadata."""
metadata_dict = metadata_dict or {} header_line = "%s%s" % (_HEADER_PREFIX, cls_name) metadata_line = "%s%s" % (_METADATA_PREFIX, json.dumps(metadata_dict, sort_keys=True)) with tf.io.gfile.GFile(filename, "wb") as f: for line in [header_line, metadata_line]: f.write(tf.compat.as_bytes(line)) f.write(tf.compat.as_bytes("\n")) if lines: f.write(tf.compat.as_bytes("\n".join(lines))) f.write(tf.compat.as_bytes("\n"))
<SYSTEM_TASK:> Read lines from file, parsing out header and metadata. <END_TASK> <USER_TASK:> Description: def read_lines_from_file(cls_name, filename): """Read lines from file, parsing out header and metadata."""
with tf.io.gfile.GFile(filename, "rb") as f: lines = [tf.compat.as_text(line)[:-1] for line in f] header_line = "%s%s" % (_HEADER_PREFIX, cls_name) if lines[0] != header_line: raise ValueError("File {fname} does not seem to have been created from " "{name}.save_to_file.".format( fname=filename, name=cls_name)) metadata_dict = json.loads(lines[1][len(_METADATA_PREFIX):]) return lines[2:], metadata_dict
<SYSTEM_TASK:> Splits a string into tokens. <END_TASK> <USER_TASK:> Description: def tokenize(self, s): """Splits a string into tokens."""
s = tf.compat.as_text(s) if self.reserved_tokens: # First split out the reserved tokens substrs = self._reserved_tokens_re.split(s) else: substrs = [s] toks = [] for substr in substrs: if substr in self.reserved_tokens: toks.append(substr) else: toks.extend(self._alphanum_re.split(substr)) # Filter out empty strings toks = [t for t in toks if t] return toks
<SYSTEM_TASK:> Return the mapping shard_id=>num_examples, assuming round-robin. <END_TASK> <USER_TASK:> Description: def get_shard_id2num_examples(num_shards, total_num_examples): """Return the mapping shard_id=>num_examples, assuming round-robin."""
# TODO(b/130353071): This has the strong assumption that the shards have # been written in a round-robin fashion. This assumption does not hold, for # instance, with Beam generation. The mapping shard_id=>num_examples # should be computed during generation. # Minimum number of example per shards num_example_in_shard = total_num_examples // num_shards shard_id2num_examples = [num_example_in_shard for _ in range(num_shards)] # If there are remaining examples, we add them to the first shards for shard_id in range(total_num_examples % num_shards): shard_id2num_examples[shard_id] += 1 return shard_id2num_examples
<SYSTEM_TASK:> Return the list of offsets associated with each shards. <END_TASK> <USER_TASK:> Description: def compute_mask_offsets(shard_id2num_examples): """Return the list of offsets associated with each shards. Args: shard_id2num_examples: `list[int]`, mapping shard_id=>num_examples Returns: mask_offsets: `list[int]`, offset to skip for each of the shard """
total_num_examples = sum(shard_id2num_examples) mask_offsets = [] total_num_examples = 0 for num_examples_in_shard in shard_id2num_examples: # The offset (nb of examples to skip in the next shard) correspond to the # number of examples remaining in the current shard mask_offsets.append(total_num_examples % 100) total_num_examples += num_examples_in_shard return mask_offsets
<SYSTEM_TASK:> Check that the two split dicts have the same names and num_shards. <END_TASK> <USER_TASK:> Description: def check_splits_equals(splits1, splits2): """Check that the two split dicts have the same names and num_shards."""
if set(splits1) ^ set(splits2): # Name intersection should be null return False for _, (split1, split2) in utils.zip_dict(splits1, splits2): if split1.num_shards != split2.num_shards: return False return True
<SYSTEM_TASK:> Returns a new SplitDict initialized from the `repeated_split_infos`. <END_TASK> <USER_TASK:> Description: def from_proto(cls, repeated_split_infos): """Returns a new SplitDict initialized from the `repeated_split_infos`."""
split_dict = cls() for split_info_proto in repeated_split_infos: split_info = SplitInfo() split_info.CopyFrom(split_info_proto) split_dict.add(split_info) return split_dict