repo_id
stringlengths 15
86
| file_path
stringlengths 28
180
| content
stringlengths 1
1.75M
| __index_level_0__
int64 0
0
|
---|---|---|---|
hf_public_repos/datasets | hf_public_repos/datasets/tests/test_data_files.py | import os
from pathlib import Path, PurePath
from typing import List
from unittest.mock import patch
import fsspec
import pytest
from fsspec.spec import AbstractFileSystem
from datasets.data_files import (
DataFilesDict,
DataFilesList,
_get_data_files_patterns,
_get_metadata_files_patterns,
_is_inside_unrequested_special_dir,
_is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir,
resolve_pattern,
)
from datasets.fingerprint import Hasher
_TEST_PATTERNS = ["*", "**", "**/*", "*.txt", "data/*", "**/*.txt", "**/train.txt"]
_FILES_TO_IGNORE = {".dummy", "README.md", "dummy_data.zip", "dataset_infos.json"}
_DIRS_TO_IGNORE = {"data/.dummy_subdir", "__pycache__"}
_TEST_PATTERNS_SIZES = {
"*": 0,
"**": 4,
"**/*": 4,
"*.txt": 0,
"data/*": 2,
"data/**": 4,
"**/*.txt": 4,
"**/train.txt": 2,
}
_TEST_URL = "https://raw.githubusercontent.com/huggingface/datasets/9675a5a1e7b99a86f9c250f6ea5fa5d1e6d5cc7d/setup.py"
@pytest.fixture
def complex_data_dir(tmp_path):
data_dir = tmp_path / "complex_data_dir"
data_dir.mkdir()
(data_dir / "data").mkdir()
with open(data_dir / "data" / "train.txt", "w") as f:
f.write("foo\n" * 10)
with open(data_dir / "data" / "test.txt", "w") as f:
f.write("bar\n" * 10)
with open(data_dir / "README.md", "w") as f:
f.write("This is a readme")
with open(data_dir / ".dummy", "w") as f:
f.write("this is a dummy file that is not a data file")
(data_dir / "data" / "subdir").mkdir()
with open(data_dir / "data" / "subdir" / "train.txt", "w") as f:
f.write("foo\n" * 10)
with open(data_dir / "data" / "subdir" / "test.txt", "w") as f:
f.write("bar\n" * 10)
(data_dir / "data" / ".dummy_subdir").mkdir()
with open(data_dir / "data" / ".dummy_subdir" / "train.txt", "w") as f:
f.write("foo\n" * 10)
with open(data_dir / "data" / ".dummy_subdir" / "test.txt", "w") as f:
f.write("bar\n" * 10)
(data_dir / "__pycache__").mkdir()
with open(data_dir / "__pycache__" / "script.py", "w") as f:
f.write("foo\n" * 10)
return str(data_dir)
def is_relative_to(path, *other):
# A built-in method in Python 3.9+
try:
path.relative_to(*other)
return True
except ValueError:
return False
@pytest.fixture
def pattern_results(complex_data_dir):
# We use fsspec glob as a reference for data files resolution from patterns.
# This is the same as dask for example.
#
# /!\ Here are some behaviors specific to fsspec glob that are different from glob.glob, Path.glob, Path.match or fnmatch:
# - '*' matches only first level items
# - '**' matches all items
# - '**/*' matches all at least second level items
#
# More generally:
# - '*' matches any character except a forward-slash (to match just the file or directory name)
# - '**' matches any character including a forward-slash /
return {
pattern: sorted(
Path(os.path.abspath(path)).as_posix()
for path in fsspec.filesystem("file").glob(os.path.join(complex_data_dir, pattern))
if Path(path).name not in _FILES_TO_IGNORE
and not any(
is_relative_to(Path(path), os.path.join(complex_data_dir, dir_path)) for dir_path in _DIRS_TO_IGNORE
)
and Path(path).is_file()
)
for pattern in _TEST_PATTERNS
}
@pytest.fixture
def hub_dataset_repo_path(tmpfs, complex_data_dir):
for path in Path(complex_data_dir).rglob("*"):
if path.is_file():
with tmpfs.open(path.relative_to(complex_data_dir).as_posix(), "wb") as f:
f.write(path.read_bytes())
yield "tmp://"
@pytest.fixture
def hub_dataset_repo_patterns_results(hub_dataset_repo_path, complex_data_dir, pattern_results):
return {
pattern: [
hub_dataset_repo_path + Path(path).relative_to(complex_data_dir).as_posix()
for path in pattern_results[pattern]
]
for pattern in pattern_results
}
def test_is_inside_unrequested_special_dir(complex_data_dir, pattern_results):
# usual patterns outside special dir work fine
for pattern, result in pattern_results.items():
if result:
matched_rel_path = str(Path(result[0]).relative_to(complex_data_dir))
assert _is_inside_unrequested_special_dir(matched_rel_path, pattern) is False
# check behavior for special dir
f = _is_inside_unrequested_special_dir
assert f("__pycache__/b.txt", "**") is True
assert f("__pycache__/b.txt", "*/b.txt") is True
assert f("__pycache__/b.txt", "__pycache__/*") is False
assert f("__pycache__/__b.txt", "__pycache__/*") is False
assert f("__pycache__/__b.txt", "__*/*") is False
assert f("__b.txt", "*") is False
def test_is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(complex_data_dir, pattern_results):
# usual patterns outside hidden dir work fine
for pattern, result in pattern_results.items():
if result:
matched_rel_path = str(Path(result[0]).relative_to(complex_data_dir))
assert _is_inside_unrequested_special_dir(matched_rel_path, pattern) is False
# check behavior for hidden dir and file
f = _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir
assert f(".hidden_file.txt", "**") is True
assert f(".hidden_file.txt", ".*") is False
assert f(".hidden_dir/a.txt", "**") is True
assert f(".hidden_dir/a.txt", ".*/*") is False
assert f(".hidden_dir/a.txt", ".hidden_dir/*") is False
assert f(".hidden_dir/.hidden_file.txt", "**") is True
assert f(".hidden_dir/.hidden_file.txt", ".*/*") is True
assert f(".hidden_dir/.hidden_file.txt", ".*/.*") is False
assert f(".hidden_dir/.hidden_file.txt", ".hidden_dir/*") is True
assert f(".hidden_dir/.hidden_file.txt", ".hidden_dir/.*") is False
@pytest.mark.parametrize("pattern", _TEST_PATTERNS)
def test_pattern_results_fixture(pattern_results, pattern):
assert len(pattern_results[pattern]) == _TEST_PATTERNS_SIZES[pattern]
assert all(Path(path).is_file() for path in pattern_results[pattern])
@pytest.mark.parametrize("pattern", _TEST_PATTERNS)
def test_resolve_pattern_locally(complex_data_dir, pattern, pattern_results):
try:
resolved_data_files = resolve_pattern(pattern, complex_data_dir)
assert sorted(str(f) for f in resolved_data_files) == pattern_results[pattern]
except FileNotFoundError:
assert len(pattern_results[pattern]) == 0
def test_resolve_pattern_locally_with_dot_in_base_path(complex_data_dir):
base_path_with_dot = os.path.join(complex_data_dir, "data", ".dummy_subdir")
resolved_data_files = resolve_pattern(os.path.join(base_path_with_dot, "train.txt"), base_path_with_dot)
assert len(resolved_data_files) == 1
def test_resolve_pattern_locally_with_absolute_path(tmp_path, complex_data_dir):
abs_path = os.path.join(complex_data_dir, "data", "train.txt")
resolved_data_files = resolve_pattern(abs_path, str(tmp_path / "blabla"))
assert len(resolved_data_files) == 1
def test_resolve_pattern_locally_with_double_dots(tmp_path, complex_data_dir):
path_with_double_dots = os.path.join(complex_data_dir, "data", "subdir", "..", "train.txt")
resolved_data_files = resolve_pattern(path_with_double_dots, str(tmp_path / "blabla"))
assert len(resolved_data_files) == 1
def test_resolve_pattern_locally_returns_hidden_file_only_if_requested(complex_data_dir):
with pytest.raises(FileNotFoundError):
resolve_pattern("*dummy", complex_data_dir)
resolved_data_files = resolve_pattern(".dummy", complex_data_dir)
assert len(resolved_data_files) == 1
def test_resolve_pattern_locally_hidden_base_path(tmp_path):
hidden = tmp_path / ".test_hidden_base_path"
hidden.mkdir()
(tmp_path / ".test_hidden_base_path" / "a.txt").touch()
resolved_data_files = resolve_pattern("*", str(hidden))
assert len(resolved_data_files) == 1
def test_resolve_pattern_locallyreturns_hidden_dir_only_if_requested(complex_data_dir):
with pytest.raises(FileNotFoundError):
resolve_pattern("data/*dummy_subdir/train.txt", complex_data_dir)
resolved_data_files = resolve_pattern("data/.dummy_subdir/train.txt", complex_data_dir)
assert len(resolved_data_files) == 1
resolved_data_files = resolve_pattern("*/.dummy_subdir/train.txt", complex_data_dir)
assert len(resolved_data_files) == 1
def test_resolve_pattern_locally_returns_special_dir_only_if_requested(complex_data_dir):
with pytest.raises(FileNotFoundError):
resolve_pattern("data/*dummy_subdir/train.txt", complex_data_dir)
resolved_data_files = resolve_pattern("data/.dummy_subdir/train.txt", complex_data_dir)
assert len(resolved_data_files) == 1
resolved_data_files = resolve_pattern("*/.dummy_subdir/train.txt", complex_data_dir)
assert len(resolved_data_files) == 1
def test_resolve_pattern_locally_special_base_path(tmp_path):
special = tmp_path / "__test_special_base_path__"
special.mkdir()
(tmp_path / "__test_special_base_path__" / "a.txt").touch()
resolved_data_files = resolve_pattern("*", str(special))
assert len(resolved_data_files) == 1
@pytest.mark.parametrize("pattern,size,extensions", [("**", 4, [".txt"]), ("**", 4, None), ("**", 0, [".blablabla"])])
def test_resolve_pattern_locally_with_extensions(complex_data_dir, pattern, size, extensions):
if size > 0:
resolved_data_files = resolve_pattern(pattern, complex_data_dir, allowed_extensions=extensions)
assert len(resolved_data_files) == size
else:
with pytest.raises(FileNotFoundError):
resolve_pattern(pattern, complex_data_dir, allowed_extensions=extensions)
def test_fail_resolve_pattern_locally(complex_data_dir):
with pytest.raises(FileNotFoundError):
resolve_pattern(complex_data_dir, ["blablabla"])
@pytest.mark.skipif(os.name == "nt", reason="Windows does not support symlinks in the default mode")
def test_resolve_pattern_locally_does_not_resolve_symbolic_links(tmp_path, complex_data_dir):
(tmp_path / "train_data_symlink.txt").symlink_to(os.path.join(complex_data_dir, "data", "train.txt"))
resolved_data_files = resolve_pattern("train_data_symlink.txt", str(tmp_path))
assert len(resolved_data_files) == 1
assert Path(resolved_data_files[0]) == tmp_path / "train_data_symlink.txt"
def test_resolve_pattern_locally_sorted_files(tmp_path_factory):
path = str(tmp_path_factory.mktemp("unsorted_text_files"))
unsorted_names = ["0.txt", "2.txt", "3.txt"]
for name in unsorted_names:
with open(os.path.join(path, name), "w"):
pass
resolved_data_files = resolve_pattern("*", path)
resolved_names = [os.path.basename(data_file) for data_file in resolved_data_files]
assert resolved_names == sorted(unsorted_names)
@pytest.mark.parametrize("pattern", _TEST_PATTERNS)
def test_resolve_pattern_in_dataset_repository(hub_dataset_repo_path, pattern, hub_dataset_repo_patterns_results):
try:
resolved_data_files = resolve_pattern(pattern, hub_dataset_repo_path)
assert sorted(str(f) for f in resolved_data_files) == hub_dataset_repo_patterns_results[pattern]
except FileNotFoundError:
assert len(hub_dataset_repo_patterns_results[pattern]) == 0
@pytest.mark.parametrize(
"pattern,size,base_path", [("**", 4, None), ("**", 4, "data"), ("**", 2, "data/subdir"), ("**", 0, "data/subdir2")]
)
def test_resolve_pattern_in_dataset_repository_with_base_path(hub_dataset_repo_path, pattern, size, base_path):
base_path = hub_dataset_repo_path + (base_path or "")
if size > 0:
resolved_data_files = resolve_pattern(pattern, base_path)
assert len(resolved_data_files) == size
else:
with pytest.raises(FileNotFoundError):
resolve_pattern(pattern, base_path)
@pytest.mark.parametrize("pattern,size,extensions", [("**", 4, [".txt"]), ("**", 4, None), ("**", 0, [".blablabla"])])
def test_resolve_pattern_in_dataset_repository_with_extensions(hub_dataset_repo_path, pattern, size, extensions):
if size > 0:
resolved_data_files = resolve_pattern(pattern, hub_dataset_repo_path, allowed_extensions=extensions)
assert len(resolved_data_files) == size
else:
with pytest.raises(FileNotFoundError):
resolved_data_files = resolve_pattern(pattern, hub_dataset_repo_path, allowed_extensions=extensions)
def test_fail_resolve_pattern_in_dataset_repository(hub_dataset_repo_path):
with pytest.raises(FileNotFoundError):
resolve_pattern("blablabla", hub_dataset_repo_path)
def test_resolve_pattern_in_dataset_repository_returns_hidden_file_only_if_requested(hub_dataset_repo_path):
with pytest.raises(FileNotFoundError):
resolve_pattern("*dummy", hub_dataset_repo_path)
resolved_data_files = resolve_pattern(".dummy", hub_dataset_repo_path)
assert len(resolved_data_files) == 1
def test_resolve_pattern_in_dataset_repository_hidden_base_path(tmpfs):
tmpfs.touch(".hidden/a.txt")
resolved_data_files = resolve_pattern("*", base_path="tmp://.hidden")
assert len(resolved_data_files) == 1
def test_resolve_pattern_in_dataset_repository_returns_hidden_dir_only_if_requested(hub_dataset_repo_path):
with pytest.raises(FileNotFoundError):
resolve_pattern("data/*dummy_subdir/train.txt", hub_dataset_repo_path)
resolved_data_files = resolve_pattern("data/.dummy_subdir/train.txt", hub_dataset_repo_path)
assert len(resolved_data_files) == 1
resolved_data_files = resolve_pattern("*/.dummy_subdir/train.txt", hub_dataset_repo_path)
assert len(resolved_data_files) == 1
def test_resolve_pattern_in_dataset_repository_returns_special_dir_only_if_requested(hub_dataset_repo_path):
with pytest.raises(FileNotFoundError):
resolve_pattern("data/*dummy_subdir/train.txt", hub_dataset_repo_path)
resolved_data_files = resolve_pattern("data/.dummy_subdir/train.txt", hub_dataset_repo_path)
assert len(resolved_data_files) == 1
resolved_data_files = resolve_pattern("*/.dummy_subdir/train.txt", hub_dataset_repo_path)
assert len(resolved_data_files) == 1
def test_resolve_pattern_in_dataset_repository_special_base_path(tmpfs):
tmpfs.touch("__special__/a.txt")
resolved_data_files = resolve_pattern("*", base_path="tmp://__special__")
assert len(resolved_data_files) == 1
@pytest.mark.parametrize("pattern", _TEST_PATTERNS)
def test_DataFilesList_from_patterns_in_dataset_repository_(
hub_dataset_repo_path, hub_dataset_repo_patterns_results, pattern
):
try:
data_files_list = DataFilesList.from_patterns([pattern], hub_dataset_repo_path)
assert sorted(data_files_list) == hub_dataset_repo_patterns_results[pattern]
assert len(data_files_list.origin_metadata) == len(data_files_list)
except FileNotFoundError:
assert len(hub_dataset_repo_patterns_results[pattern]) == 0
def test_DataFilesList_from_patterns_locally_with_extra_files(complex_data_dir, text_file):
data_files_list = DataFilesList.from_patterns([_TEST_URL, text_file.as_posix()], complex_data_dir)
assert list(data_files_list) == [_TEST_URL, text_file.as_posix()]
assert len(data_files_list.origin_metadata) == 2
@pytest.mark.parametrize("pattern", _TEST_PATTERNS)
def test_DataFilesDict_from_patterns_in_dataset_repository(
hub_dataset_repo_path, hub_dataset_repo_patterns_results, pattern
):
split_name = "train"
try:
data_files = DataFilesDict.from_patterns({split_name: [pattern]}, hub_dataset_repo_path)
assert all(isinstance(data_files_list, DataFilesList) for data_files_list in data_files.values())
assert sorted(data_files[split_name]) == hub_dataset_repo_patterns_results[pattern]
except FileNotFoundError:
assert len(hub_dataset_repo_patterns_results[pattern]) == 0
@pytest.mark.parametrize(
"pattern,size,base_path,split_name",
[
("**", 4, None, "train"),
("**", 4, "data", "train"),
("**", 2, "data/subdir", "train"),
("**train*", 1, "data/subdir", "train"),
("**test*", 1, "data/subdir", "test"),
("**", 0, "data/subdir2", "train"),
],
)
def test_DataFilesDict_from_patterns_in_dataset_repository_with_base_path(
hub_dataset_repo_path, pattern, size, base_path, split_name
):
base_path = hub_dataset_repo_path + (base_path or "")
if size > 0:
data_files = DataFilesDict.from_patterns({split_name: [pattern]}, base_path=base_path)
assert len(data_files[split_name]) == size
else:
with pytest.raises(FileNotFoundError):
resolve_pattern(pattern, base_path)
@pytest.mark.parametrize("pattern", _TEST_PATTERNS)
def test_DataFilesDict_from_patterns_locally(complex_data_dir, pattern_results, pattern):
split_name = "train"
try:
data_files = DataFilesDict.from_patterns({split_name: [pattern]}, complex_data_dir)
assert all(isinstance(data_files_list, DataFilesList) for data_files_list in data_files.values())
assert sorted(data_files[split_name]) == pattern_results[pattern]
except FileNotFoundError:
assert len(pattern_results[pattern]) == 0
def test_DataFilesDict_from_patterns_in_dataset_repository_hashing(hub_dataset_repo_path):
patterns = {"train": ["**/train.txt"], "test": ["**/test.txt"]}
data_files1 = DataFilesDict.from_patterns(patterns, hub_dataset_repo_path)
data_files2 = DataFilesDict.from_patterns(patterns, hub_dataset_repo_path)
assert Hasher.hash(data_files1) == Hasher.hash(data_files2)
data_files2 = DataFilesDict(sorted(data_files1.items(), reverse=True))
assert Hasher.hash(data_files1) == Hasher.hash(data_files2)
patterns2 = {"train": ["data/**train.txt"], "test": ["data/**test.txt"]}
data_files2 = DataFilesDict.from_patterns(patterns2, hub_dataset_repo_path)
assert Hasher.hash(data_files1) == Hasher.hash(data_files2)
patterns2 = {"train": ["data/**train.txt"], "test": ["data/**train.txt"]}
data_files2 = DataFilesDict.from_patterns(patterns2, hub_dataset_repo_path)
assert Hasher.hash(data_files1) != Hasher.hash(data_files2)
# the tmpfs used to mock the hub repo is based on a local directory
# therefore os.stat is used to get the mtime of the data files
with patch("os.stat", return_value=os.stat(__file__)):
data_files2 = DataFilesDict.from_patterns(patterns, hub_dataset_repo_path)
assert Hasher.hash(data_files1) != Hasher.hash(data_files2)
def test_DataFilesDict_from_patterns_locally_or_remote_hashing(text_file):
patterns = {"train": [_TEST_URL], "test": [str(text_file)]}
data_files1 = DataFilesDict.from_patterns(patterns)
data_files2 = DataFilesDict.from_patterns(patterns)
assert Hasher.hash(data_files1) == Hasher.hash(data_files2)
data_files2 = DataFilesDict(sorted(data_files1.items(), reverse=True))
assert Hasher.hash(data_files1) == Hasher.hash(data_files2)
patterns2 = {"train": [_TEST_URL], "test": [_TEST_URL]}
data_files2 = DataFilesDict.from_patterns(patterns2)
assert Hasher.hash(data_files1) != Hasher.hash(data_files2)
with patch("fsspec.implementations.http._file_info", return_value={}):
data_files2 = DataFilesDict.from_patterns(patterns)
assert Hasher.hash(data_files1) != Hasher.hash(data_files2)
with patch("os.stat", return_value=os.stat(__file__)):
data_files2 = DataFilesDict.from_patterns(patterns)
assert Hasher.hash(data_files1) != Hasher.hash(data_files2)
def mock_fs(file_paths: List[str]):
"""
Set up a mock filesystem for fsspec containing the provided files
Example:
```py
>>> fs = mock_fs(["data/train.txt", "data.test.txt"])
>>> assert fsspec.get_filesystem_class("mock").__name__ == "DummyTestFS"
>>> assert type(fs).__name__ == "DummyTestFS"
>>> print(fs.glob("**"))
["data", "data/train.txt", "data.test.txt"]
```
"""
dir_paths = {file_path.rsplit("/")[0] for file_path in file_paths if "/" in file_path}
fs_contents = [{"name": dir_path, "type": "directory"} for dir_path in dir_paths] + [
{"name": file_path, "type": "file", "size": 10} for file_path in file_paths
]
class DummyTestFS(AbstractFileSystem):
protocol = "mock"
_fs_contents = fs_contents
def ls(self, path, detail=True, refresh=True, **kwargs):
if kwargs.pop("strip_proto", True):
path = self._strip_protocol(path)
files = not refresh and self._ls_from_cache(path)
if not files:
files = [file for file in self._fs_contents if path == self._parent(file["name"])]
files.sort(key=lambda file: file["name"])
self.dircache[path.rstrip("/")] = files
if detail:
return files
return [file["name"] for file in files]
return DummyTestFS()
@pytest.mark.parametrize(
"data_file_per_split",
[
# === Main cases ===
# file named after split at the root
{"train": "train.txt", "validation": "valid.txt", "test": "test.txt"},
# file named after split in a directory
{
"train": "data/train.txt",
"validation": "data/valid.txt",
"test": "data/test.txt",
},
# directory named after split
{
"train": "train/split.txt",
"validation": "valid/split.txt",
"test": "test/split.txt",
},
# sharded splits
{
"train": [f"data/train_{i}.txt" for i in range(3)],
"validation": [f"data/validation_{i}.txt" for i in range(3)],
"test": [f"data/test_{i}.txt" for i in range(3)],
},
# sharded splits with standard format (+ custom split name)
{
"train": [f"data/train-0000{i}-of-00003.txt" for i in range(3)],
"validation": [f"data/validation-0000{i}-of-00003.txt" for i in range(3)],
"test": [f"data/test-0000{i}-of-00003.txt" for i in range(3)],
"random": [f"data/random-0000{i}-of-00003.txt" for i in range(3)],
},
# === Secondary cases ===
# Default to train split
{"train": "dataset.txt"},
{"train": "data/dataset.txt"},
{"train": ["data/image.jpg", "metadata.jsonl"]},
{"train": ["data/image.jpg", "metadata.csv"]},
# With prefix or suffix in directory or file names
{"train": "my_train_dir/dataset.txt"},
{"train": "data/my_train_file.txt"},
{"test": "my_test_dir/dataset.txt"},
{"test": "data/my_test_file.txt"},
{"validation": "my_validation_dir/dataset.txt"},
{"validation": "data/my_validation_file.txt"},
# With test<>eval aliases
{"test": "eval.txt"},
{"test": "data/eval.txt"},
{"test": "eval/dataset.txt"},
# With valid<>dev aliases
{"validation": "dev.txt"},
{"validation": "data/dev.txt"},
{"validation": "dev/dataset.txt"},
# With valid<>val aliases
{"validation": "val.txt"},
{"validation": "data/val.txt"},
# With other extensions
{"train": "train.parquet", "validation": "valid.parquet", "test": "test.parquet"},
# With "dev" or "eval" without separators
{"train": "developers_list.txt"},
{"train": "data/seqeval_results.txt"},
{"train": "contest.txt"},
# With supported separators
{"test": "my.test.file.txt"},
{"test": "my-test-file.txt"},
{"test": "my_test_file.txt"},
{"test": "my test file.txt"},
{"test": "test00001.txt"},
],
)
def test_get_data_files_patterns(data_file_per_split):
data_file_per_split = {k: v if isinstance(v, list) else [v] for k, v in data_file_per_split.items()}
file_paths = [file_path for split_file_paths in data_file_per_split.values() for file_path in split_file_paths]
fs = mock_fs(file_paths)
def resolver(pattern):
return [file_path for file_path in fs.glob(pattern) if fs.isfile(file_path)]
patterns_per_split = _get_data_files_patterns(resolver)
assert list(patterns_per_split.keys()) == list(data_file_per_split.keys()) # Test split order with list()
for split, patterns in patterns_per_split.items():
matched = [file_path for pattern in patterns for file_path in resolver(pattern)]
assert matched == data_file_per_split[split]
@pytest.mark.parametrize(
"metadata_files",
[
# metadata files at the root
["metadata.jsonl"],
["metadata.csv"],
# nested metadata files
["data/metadata.jsonl", "data/train/metadata.jsonl"],
["data/metadata.csv", "data/train/metadata.csv"],
],
)
def test_get_metadata_files_patterns(metadata_files):
def resolver(pattern):
return [PurePath(path) for path in set(metadata_files) if PurePath(path).match(pattern)]
patterns = _get_metadata_files_patterns(resolver)
matched = [path for path in metadata_files for pattern in patterns if PurePath(path).match(pattern)]
# Use set to remove the difference between in behavior between PurePath.match and mathcing via fsspec.glob
assert len(set(matched)) == len(metadata_files)
assert sorted(set(matched)) == sorted(metadata_files)
| 0 |
hf_public_repos/datasets | hf_public_repos/datasets/tests/test_arrow_reader.py | import os
import tempfile
from pathlib import Path
from unittest import TestCase
import pyarrow as pa
import pytest
from datasets.arrow_dataset import Dataset
from datasets.arrow_reader import ArrowReader, BaseReader, FileInstructions, ReadInstruction, make_file_instructions
from datasets.info import DatasetInfo
from datasets.splits import NamedSplit, Split, SplitDict, SplitInfo
from .utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
class ReaderTest(BaseReader):
"""
Build a Dataset object out of Instruction instance(s).
This reader is made for testing. It mocks file reads.
"""
def _get_table_from_filename(self, filename_skip_take, in_memory=False):
"""Returns a Dataset instance from given (filename, skip, take)."""
filename, skip, take = (
filename_skip_take["filename"],
filename_skip_take["skip"] if "skip" in filename_skip_take else None,
filename_skip_take["take"] if "take" in filename_skip_take else None,
)
open(os.path.join(filename), "wb").close()
pa_table = pa.Table.from_pydict({"filename": [Path(filename).name] * 100})
if take == -1:
take = len(pa_table) - skip
if skip is not None and take is not None:
pa_table = pa_table.slice(skip, take)
return pa_table
class BaseReaderTest(TestCase):
def test_read(self):
name = "my_name"
train_info = SplitInfo(name="train", num_examples=100)
test_info = SplitInfo(name="test", num_examples=100)
split_infos = [train_info, test_info]
split_dict = SplitDict()
split_dict.add(train_info)
split_dict.add(test_info)
info = DatasetInfo(splits=split_dict)
with tempfile.TemporaryDirectory() as tmp_dir:
reader = ReaderTest(tmp_dir, info)
instructions = "test[:33%]"
dset = Dataset(**reader.read(name, instructions, split_infos))
self.assertEqual(dset["filename"][0], f"{name}-test")
self.assertEqual(dset.num_rows, 33)
self.assertEqual(dset.num_columns, 1)
instructions1 = ["train", "test[:33%]"]
instructions2 = [Split.TRAIN, ReadInstruction.from_spec("test[:33%]")]
for instructions in [instructions1, instructions2]:
datasets_kwargs = [reader.read(name, instr, split_infos) for instr in instructions]
train_dset, test_dset = (Dataset(**dataset_kwargs) for dataset_kwargs in datasets_kwargs)
self.assertEqual(train_dset["filename"][0], f"{name}-train")
self.assertEqual(train_dset.num_rows, 100)
self.assertEqual(train_dset.num_columns, 1)
self.assertIsInstance(train_dset.split, NamedSplit)
self.assertEqual(str(train_dset.split), "train")
self.assertEqual(test_dset["filename"][0], f"{name}-test")
self.assertEqual(test_dset.num_rows, 33)
self.assertEqual(test_dset.num_columns, 1)
self.assertIsInstance(test_dset.split, NamedSplit)
self.assertEqual(str(test_dset.split), "test[:33%]")
del train_dset, test_dset
def test_read_sharded(self):
name = "my_name"
train_info = SplitInfo(name="train", num_examples=1000, shard_lengths=[100] * 10)
split_infos = [train_info]
split_dict = SplitDict()
split_dict.add(train_info)
info = DatasetInfo(splits=split_dict)
with tempfile.TemporaryDirectory() as tmp_dir:
reader = ReaderTest(tmp_dir, info)
instructions = "train[:33%]"
dset = Dataset(**reader.read(name, instructions, split_infos))
self.assertEqual(dset["filename"][0], f"{name}-train-00000-of-00010")
self.assertEqual(dset["filename"][-1], f"{name}-train-00003-of-00010")
self.assertEqual(dset.num_rows, 330)
self.assertEqual(dset.num_columns, 1)
def test_read_files(self):
train_info = SplitInfo(name="train", num_examples=100)
test_info = SplitInfo(name="test", num_examples=100)
split_dict = SplitDict()
split_dict.add(train_info)
split_dict.add(test_info)
info = DatasetInfo(splits=split_dict)
with tempfile.TemporaryDirectory() as tmp_dir:
reader = ReaderTest(tmp_dir, info)
files = [
{"filename": os.path.join(tmp_dir, "train")},
{"filename": os.path.join(tmp_dir, "test"), "skip": 10, "take": 10},
]
dset = Dataset(**reader.read_files(files, original_instructions="train+test[10:20]"))
self.assertEqual(dset.num_rows, 110)
self.assertEqual(dset.num_columns, 1)
del dset
@pytest.mark.parametrize("in_memory", [False, True])
def test_read_table(in_memory, dataset, arrow_file):
filename = arrow_file
with assert_arrow_memory_increases() if in_memory else assert_arrow_memory_doesnt_increase():
table = ArrowReader.read_table(filename, in_memory=in_memory)
assert table.shape == dataset.data.shape
assert set(table.column_names) == set(dataset.data.column_names)
assert dict(table.to_pydict()) == dict(dataset.data.to_pydict()) # to_pydict returns OrderedDict
@pytest.mark.parametrize("in_memory", [False, True])
def test_read_files(in_memory, dataset, arrow_file):
filename = arrow_file
reader = ArrowReader("", None)
with assert_arrow_memory_increases() if in_memory else assert_arrow_memory_doesnt_increase():
dataset_kwargs = reader.read_files([{"filename": filename}], in_memory=in_memory)
assert dataset_kwargs.keys() == {"arrow_table", "info", "split"}
table = dataset_kwargs["arrow_table"]
assert table.shape == dataset.data.shape
assert set(table.column_names) == set(dataset.data.column_names)
assert dict(table.to_pydict()) == dict(dataset.data.to_pydict()) # to_pydict returns OrderedDict
def test_read_instruction_spec():
assert ReadInstruction("train", to=10, unit="abs").to_spec() == "train[:10]"
assert ReadInstruction("train", from_=-80, to=10, unit="%").to_spec() == "train[-80%:10%]"
spec_train_test = "train+test"
assert ReadInstruction.from_spec(spec_train_test).to_spec() == spec_train_test
spec_train_abs = "train[2:10]"
assert ReadInstruction.from_spec(spec_train_abs).to_spec() == spec_train_abs
spec_train_pct = "train[15%:-20%]"
assert ReadInstruction.from_spec(spec_train_pct).to_spec() == spec_train_pct
spec_train_pct_rounding = "train[:10%](closest)"
assert ReadInstruction.from_spec(spec_train_pct_rounding).to_spec() == "train[:10%]"
spec_train_pct_rounding = "train[:10%](pct1_dropremainder)"
assert ReadInstruction.from_spec(spec_train_pct_rounding).to_spec() == spec_train_pct_rounding
spec_train_test_pct_rounding = "train[:10%](pct1_dropremainder)+test[-10%:](pct1_dropremainder)"
assert ReadInstruction.from_spec(spec_train_test_pct_rounding).to_spec() == spec_train_test_pct_rounding
def test_make_file_instructions():
name = "dummy"
split_infos = [SplitInfo(name="train", num_examples=100)]
instruction = "train[:33%]"
filetype_suffix = "arrow"
prefix_path = "prefix"
file_instructions = make_file_instructions(name, split_infos, instruction, filetype_suffix, prefix_path)
assert isinstance(file_instructions, FileInstructions)
assert file_instructions.num_examples == 33
assert file_instructions.file_instructions == [
{"filename": os.path.join(prefix_path, f"{name}-train.arrow"), "skip": 0, "take": 33}
]
split_infos = [SplitInfo(name="train", num_examples=100, shard_lengths=[10] * 10)]
file_instructions = make_file_instructions(name, split_infos, instruction, filetype_suffix, prefix_path)
assert isinstance(file_instructions, FileInstructions)
assert file_instructions.num_examples == 33
assert file_instructions.file_instructions == [
{"filename": os.path.join(prefix_path, f"{name}-train-00000-of-00010.arrow"), "skip": 0, "take": -1},
{"filename": os.path.join(prefix_path, f"{name}-train-00001-of-00010.arrow"), "skip": 0, "take": -1},
{"filename": os.path.join(prefix_path, f"{name}-train-00002-of-00010.arrow"), "skip": 0, "take": -1},
{"filename": os.path.join(prefix_path, f"{name}-train-00003-of-00010.arrow"), "skip": 0, "take": 3},
]
@pytest.mark.parametrize("name, expected_exception", [(None, TypeError), ("", ValueError)])
def test_make_file_instructions_raises(name, expected_exception):
split_infos = [SplitInfo(name="train", num_examples=100)]
instruction = "train"
filetype_suffix = "arrow"
prefix_path = "prefix_path"
with pytest.raises(expected_exception):
_ = make_file_instructions(name, split_infos, instruction, filetype_suffix, prefix_path)
| 0 |
hf_public_repos/datasets | hf_public_repos/datasets/tests/test_hub.py | from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id", ["canonical_dataset_name", "org-name/dataset-name"])
@pytest.mark.parametrize("path", ["filename.csv", "filename with blanks.csv"])
@pytest.mark.parametrize("revision", [None, "v2"])
def test_hf_hub_url(repo_id, path, revision):
url = hf_hub_url(repo_id=repo_id, path=path, revision=revision)
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(path)}"
| 0 |
hf_public_repos/datasets | hf_public_repos/datasets/tests/_test_patching.py | # isort: skip_file
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: F401 - this is just for tests
import os as renamed_os # noqa: F401 - this is just for tests
from os import path # noqa: F401 - this is just for tests
from os import path as renamed_path # noqa: F401 - this is just for tests
from os.path import join # noqa: F401 - this is just for tests
from os.path import join as renamed_join # noqa: F401 - this is just for tests
open = open # noqa we just need to have a builtin inside this module to test it properly
| 0 |
hf_public_repos/datasets | hf_public_repos/datasets/tests/test_search.py | import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
pytestmark = pytest.mark.integration
@require_faiss
class IndexableDatasetTest(TestCase):
def _create_dummy_dataset(self):
dset = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(x) for x in np.arange(30).tolist()]})
return dset
def test_add_faiss_index(self):
import faiss
dset: Dataset = self._create_dummy_dataset()
dset = dset.map(
lambda ex, i: {"vecs": i * np.ones(5, dtype=np.float32)}, with_indices=True, keep_in_memory=True
)
dset = dset.add_faiss_index("vecs", batch_size=100, metric_type=faiss.METRIC_INNER_PRODUCT)
scores, examples = dset.get_nearest_examples("vecs", np.ones(5, dtype=np.float32))
self.assertEqual(examples["filename"][0], "my_name-train_29")
dset.drop_index("vecs")
def test_add_faiss_index_from_external_arrays(self):
import faiss
dset: Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1, 1),
index_name="vecs",
batch_size=100,
metric_type=faiss.METRIC_INNER_PRODUCT,
)
scores, examples = dset.get_nearest_examples("vecs", np.ones(5, dtype=np.float32))
self.assertEqual(examples["filename"][0], "my_name-train_29")
def test_serialization(self):
import faiss
dset: Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1, 1),
index_name="vecs",
metric_type=faiss.METRIC_INNER_PRODUCT,
)
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
dset.save_faiss_index("vecs", tmp_file.name)
dset.load_faiss_index("vecs2", tmp_file.name)
os.unlink(tmp_file.name)
scores, examples = dset.get_nearest_examples("vecs2", np.ones(5, dtype=np.float32))
self.assertEqual(examples["filename"][0], "my_name-train_29")
def test_drop_index(self):
dset: Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1, 1), index_name="vecs"
)
dset.drop_index("vecs")
self.assertRaises(MissingIndex, partial(dset.get_nearest_examples, "vecs2", np.ones(5, dtype=np.float32)))
def test_add_elasticsearch_index(self):
from elasticsearch import Elasticsearch
dset: Dataset = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search") as mocked_search, patch(
"elasticsearch.client.IndicesClient.create"
) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk") as mocked_bulk:
mocked_index_create.return_value = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30)
mocked_search.return_value = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
es_client = Elasticsearch()
dset.add_elasticsearch_index("filename", es_client=es_client)
scores, examples = dset.get_nearest_examples("filename", "my_name-train_29")
self.assertEqual(examples["filename"][0], "my_name-train_29")
@require_faiss
class FaissIndexTest(TestCase):
def test_flat_ip(self):
import faiss
index = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
# add vectors
index.add_vectors(np.eye(5, dtype=np.float32))
self.assertIsNotNone(index.faiss_index)
self.assertEqual(index.faiss_index.ntotal, 5)
index.add_vectors(np.zeros((5, 5), dtype=np.float32))
self.assertEqual(index.faiss_index.ntotal, 10)
# single query
query = np.zeros(5, dtype=np.float32)
query[1] = 1
scores, indices = index.search(query)
self.assertRaises(ValueError, index.search, query.reshape(-1, 1))
self.assertGreater(scores[0], 0)
self.assertEqual(indices[0], 1)
# batched queries
queries = np.eye(5, dtype=np.float32)[::-1]
total_scores, total_indices = index.search_batch(queries)
self.assertRaises(ValueError, index.search_batch, queries[0])
best_scores = [scores[0] for scores in total_scores]
best_indices = [indices[0] for indices in total_indices]
self.assertGreater(np.min(best_scores), 0)
self.assertListEqual([4, 3, 2, 1, 0], best_indices)
def test_factory(self):
import faiss
index = FaissIndex(string_factory="Flat")
index.add_vectors(np.eye(5, dtype=np.float32))
self.assertIsInstance(index.faiss_index, faiss.IndexFlat)
index = FaissIndex(string_factory="LSH")
index.add_vectors(np.eye(5, dtype=np.float32))
self.assertIsInstance(index.faiss_index, faiss.IndexLSH)
with self.assertRaises(ValueError):
_ = FaissIndex(string_factory="Flat", custom_index=faiss.IndexFlat(5))
def test_custom(self):
import faiss
custom_index = faiss.IndexFlat(5)
index = FaissIndex(custom_index=custom_index)
index.add_vectors(np.eye(5, dtype=np.float32))
self.assertIsInstance(index.faiss_index, faiss.IndexFlat)
def test_serialization(self):
import faiss
index = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5, dtype=np.float32))
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
index.save(tmp_file.name)
index = FaissIndex.load(tmp_file.name)
os.unlink(tmp_file.name)
query = np.zeros(5, dtype=np.float32)
query[1] = 1
scores, indices = index.search(query)
self.assertGreater(scores[0], 0)
self.assertEqual(indices[0], 1)
@require_faiss
def test_serialization_fs(mockfs):
import faiss
index = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5, dtype=np.float32))
index_name = "index.faiss"
path = f"mock://{index_name}"
index.save(path, storage_options=mockfs.storage_options)
index = FaissIndex.load(path, storage_options=mockfs.storage_options)
query = np.zeros(5, dtype=np.float32)
query[1] = 1
scores, indices = index.search(query)
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class ElasticSearchIndexTest(TestCase):
def test_elasticsearch(self):
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search") as mocked_search, patch(
"elasticsearch.client.IndicesClient.create"
) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk") as mocked_bulk:
es_client = Elasticsearch()
mocked_index_create.return_value = {"acknowledged": True}
index = ElasticSearchIndex(es_client=es_client)
mocked_bulk.return_value([(True, None)] * 3)
index.add_documents(["foo", "bar", "foobar"])
# single query
query = "foo"
mocked_search.return_value = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
scores, indices = index.search(query)
self.assertEqual(scores[0], 1)
self.assertEqual(indices[0], 0)
# single query with timeout
query = "foo"
mocked_search.return_value = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
scores, indices = index.search(query, request_timeout=30)
self.assertEqual(scores[0], 1)
self.assertEqual(indices[0], 0)
# batched queries
queries = ["foo", "bar", "foobar"]
mocked_search.return_value = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
total_scores, total_indices = index.search_batch(queries)
best_scores = [scores[0] for scores in total_scores]
best_indices = [indices[0] for indices in total_indices]
self.assertGreater(np.min(best_scores), 0)
self.assertListEqual([1, 1, 1], best_indices)
# batched queries with timeout
queries = ["foo", "bar", "foobar"]
mocked_search.return_value = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
total_scores, total_indices = index.search_batch(queries, request_timeout=30)
best_scores = [scores[0] for scores in total_scores]
best_indices = [indices[0] for indices in total_indices]
self.assertGreater(np.min(best_scores), 0)
self.assertListEqual([1, 1, 1], best_indices)
| 0 |
hf_public_repos/datasets | hf_public_repos/datasets/tests/test_logging.py | from unittest.mock import patch
import datasets
from datasets import Dataset
def test_enable_disable_progress_bar():
dset = Dataset.from_dict({"col_1": [3, 2, 0, 1]})
with patch("tqdm.auto.tqdm") as mock_tqdm:
datasets.disable_progress_bar()
dset.map(lambda x: {"col_2": x["col_1"] + 1})
mock_tqdm.assert_not_called()
mock_tqdm.reset_mock()
datasets.enable_progress_bar()
dset.map(lambda x: {"col_2": x["col_1"] + 1})
mock_tqdm.assert_called()
| 0 |
hf_public_repos/datasets | hf_public_repos/datasets/tests/test_readme_util.py | import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
example_yaml_structure = yaml.safe_load(
"""\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
"""
)
CORRECT_DICT = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
README_CORRECT = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
README_CORRECT_FOUR_LEVEL = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
CORRECT_DICT_FOUR_LEVEL = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Extra Ignored Subsection",
"text": "",
"is_empty_text": True,
"subsections": [],
}
],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
README_EMPTY_YAML = """\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
EXPECTED_ERROR_README_EMPTY_YAML = (
"The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."
)
README_NO_YAML = """\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
EXPECTED_ERROR_README_NO_YAML = (
"The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."
)
README_INCORRECT_YAML = """\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
EXPECTED_ERROR_README_INCORRECT_YAML = "The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."
README_MISSING_TEXT = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
EXPECTED_ERROR_README_MISSING_TEXT = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."
README_NONE_SUBSECTION = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
"""
EXPECTED_ERROR_README_NONE_SUBSECTION = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."
README_MISSING_SUBSECTION = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
"""
EXPECTED_ERROR_README_MISSING_SUBSECTION = "The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."
README_MISSING_CONTENT = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
"""
EXPECTED_ERROR_README_MISSING_CONTENT = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."
README_MISSING_FIRST_LEVEL = """\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
EXPECTED_ERROR_README_MISSING_FIRST_LEVEL = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."
README_MULTIPLE_WRONG_FIRST_LEVEL = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
"""
EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL = "The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."
README_WRONG_FIRST_LEVEL = """\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
EXPECTED_ERROR_README_WRONG_FIRST_LEVEL = "The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
README_EMPTY = ""
EXPECTED_ERROR_README_EMPTY = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."
README_MULTIPLE_SAME_HEADING_1 = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1 = "The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."
@pytest.mark.parametrize(
"readme_md, expected_dict",
[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
],
)
def test_readme_from_string_correct(readme_md, expected_dict):
assert ReadMe.from_string(readme_md, example_yaml_structure).to_dict() == expected_dict
@pytest.mark.parametrize(
"readme_md, expected_error",
[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
],
)
def test_readme_from_string_validation_errors(readme_md, expected_error):
with pytest.raises(ValueError, match=re.escape(expected_error.format(path="root"))):
readme = ReadMe.from_string(readme_md, example_yaml_structure)
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error",
[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
],
)
def test_readme_from_string_parsing_errors(readme_md, expected_error):
with pytest.raises(ValueError, match=re.escape(expected_error.format(path="root"))):
ReadMe.from_string(readme_md, example_yaml_structure)
@pytest.mark.parametrize(
"readme_md,",
[
(README_MULTIPLE_SAME_HEADING_1),
],
)
def test_readme_from_string_suppress_parsing_errors(readme_md):
ReadMe.from_string(readme_md, example_yaml_structure, suppress_parsing_errors=True)
@pytest.mark.parametrize(
"readme_md, expected_dict",
[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
],
)
def test_readme_from_readme_correct(readme_md, expected_dict):
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir) / "README.md"
with open(path, "w+") as readme_file:
readme_file.write(readme_md)
out = ReadMe.from_readme(path, example_yaml_structure).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"readme_md, expected_error",
[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
],
)
def test_readme_from_readme_error(readme_md, expected_error):
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir) / "README.md"
with open(path, "w+") as readme_file:
readme_file.write(readme_md)
expected_error = expected_error.format(path=path)
with pytest.raises(ValueError, match=re.escape(expected_error)):
readme = ReadMe.from_readme(path, example_yaml_structure)
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error",
[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
],
)
def test_readme_from_readme_parsing_errors(readme_md, expected_error):
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir) / "README.md"
with open(path, "w+") as readme_file:
readme_file.write(readme_md)
expected_error = expected_error.format(path=path)
with pytest.raises(ValueError, match=re.escape(expected_error)):
ReadMe.from_readme(path, example_yaml_structure)
@pytest.mark.parametrize(
"readme_md,",
[
(README_MULTIPLE_SAME_HEADING_1),
],
)
def test_readme_from_readme_suppress_parsing_errors(readme_md):
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir) / "README.md"
with open(path, "w+") as readme_file:
readme_file.write(readme_md)
ReadMe.from_readme(path, example_yaml_structure, suppress_parsing_errors=True)
| 0 |
hf_public_repos/datasets | hf_public_repos/datasets/tests/test_dataset_dict.py | import os
import tempfile
from unittest import TestCase
import numpy as np
import pandas as pd
import pytest
from datasets import load_from_disk
from datasets.arrow_dataset import Dataset
from datasets.dataset_dict import DatasetDict, IterableDatasetDict
from datasets.features import ClassLabel, Features, Sequence, Value
from datasets.iterable_dataset import IterableDataset
from datasets.splits import NamedSplit
from .utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_tf, require_torch
class DatasetDictTest(TestCase):
def _create_dummy_dataset(self, multiple_columns=False):
if multiple_columns:
data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
dset = Dataset.from_dict(data)
else:
dset = Dataset.from_dict(
{"filename": ["my_name-train" + "_" + f"{x:03d}" for x in np.arange(30).tolist()]}
)
return dset
def _create_dummy_dataset_dict(self, multiple_columns=False) -> DatasetDict:
return DatasetDict(
{
"train": self._create_dummy_dataset(multiple_columns=multiple_columns),
"test": self._create_dummy_dataset(multiple_columns=multiple_columns),
}
)
def _create_dummy_iterable_dataset(self, multiple_columns=False) -> IterableDataset:
def gen():
if multiple_columns:
data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
for v1, v2 in zip(data["col_1"], data["col_2"]):
yield {"col_1": v1, "col_2": v2}
else:
for x in range(30):
yield {"filename": "my_name-train" + "_" + f"{x:03d}"}
return IterableDataset.from_generator(gen)
def _create_dummy_iterable_dataset_dict(self, multiple_columns=False) -> IterableDatasetDict:
return IterableDatasetDict(
{
"train": self._create_dummy_iterable_dataset(multiple_columns=multiple_columns),
"test": self._create_dummy_iterable_dataset(multiple_columns=multiple_columns),
}
)
def test_flatten(self):
dset_split = Dataset.from_dict(
{"a": [{"b": {"c": ["text"]}}] * 10, "foo": [1] * 10},
features=Features({"a": {"b": Sequence({"c": Value("string")})}, "foo": Value("int64")}),
)
dset = DatasetDict({"train": dset_split, "test": dset_split})
dset = dset.flatten()
self.assertDictEqual(dset.column_names, {"train": ["a.b.c", "foo"], "test": ["a.b.c", "foo"]})
self.assertListEqual(sorted(dset["train"].features.keys()), ["a.b.c", "foo"])
self.assertDictEqual(
dset["train"].features, Features({"a.b.c": Sequence(Value("string")), "foo": Value("int64")})
)
del dset
def test_set_format_numpy(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.set_format(type="numpy", columns=["col_1"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 1)
self.assertIsInstance(dset_split[0]["col_1"], np.int64)
self.assertEqual(dset_split[0]["col_1"].item(), 3)
dset.reset_format()
with dset.formatted_as(type="numpy", columns=["col_1"]):
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 1)
self.assertIsInstance(dset_split[0]["col_1"], np.int64)
self.assertEqual(dset_split[0]["col_1"].item(), 3)
for dset_split in dset.values():
self.assertEqual(dset_split.format["type"], None)
self.assertEqual(dset_split.format["format_kwargs"], {})
self.assertEqual(dset_split.format["columns"], dset_split.column_names)
self.assertEqual(dset_split.format["output_all_columns"], False)
dset.set_format(type="numpy", columns=["col_1"], output_all_columns=True)
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertIsInstance(dset_split[0]["col_2"], str)
self.assertEqual(dset_split[0]["col_2"], "a")
dset.set_format(type="numpy", columns=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertIsInstance(dset_split[0]["col_2"], np.str_)
self.assertEqual(dset_split[0]["col_2"].item(), "a")
del dset
@require_torch
def test_set_format_torch(self):
import torch
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.set_format(type="torch", columns=["col_1"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 1)
self.assertIsInstance(dset_split[0]["col_1"], torch.Tensor)
self.assertListEqual(list(dset_split[0]["col_1"].shape), [])
self.assertEqual(dset_split[0]["col_1"].item(), 3)
dset.set_format(type="torch", columns=["col_1"], output_all_columns=True)
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertIsInstance(dset_split[0]["col_2"], str)
self.assertEqual(dset_split[0]["col_2"], "a")
dset.set_format(type="torch")
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertIsInstance(dset_split[0]["col_1"], torch.Tensor)
self.assertListEqual(list(dset_split[0]["col_1"].shape), [])
self.assertEqual(dset_split[0]["col_1"].item(), 3)
self.assertIsInstance(dset_split[0]["col_2"], str)
self.assertEqual(dset_split[0]["col_2"], "a")
del dset
@require_tf
def test_set_format_tf(self):
import tensorflow as tf
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.set_format(type="tensorflow", columns=["col_1"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 1)
self.assertIsInstance(dset_split[0]["col_1"], tf.Tensor)
self.assertListEqual(list(dset_split[0]["col_1"].shape), [])
self.assertEqual(dset_split[0]["col_1"].numpy().item(), 3)
dset.set_format(type="tensorflow", columns=["col_1"], output_all_columns=True)
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertIsInstance(dset_split[0]["col_2"], str)
self.assertEqual(dset_split[0]["col_2"], "a")
dset.set_format(type="tensorflow", columns=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertEqual(dset_split[0]["col_2"].numpy().decode("utf-8"), "a")
del dset
def test_set_format_pandas(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.set_format(type="pandas", columns=["col_1"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0].columns), 1)
self.assertIsInstance(dset_split[0], pd.DataFrame)
self.assertListEqual(list(dset_split[0].shape), [1, 1])
self.assertEqual(dset_split[0]["col_1"].item(), 3)
dset.set_format(type="pandas", columns=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0].columns), 2)
self.assertEqual(dset_split[0]["col_2"].item(), "a")
del dset
def test_set_transform(self):
def transform(batch):
return {k: [str(i).upper() for i in v] for k, v in batch.items()}
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.set_transform(transform=transform, columns=["col_1"])
for dset_split in dset.values():
self.assertEqual(dset_split.format["type"], "custom")
self.assertEqual(len(dset_split[0].keys()), 1)
self.assertEqual(dset_split[0]["col_1"], "3")
self.assertEqual(dset_split[:2]["col_1"], ["3", "2"])
self.assertEqual(dset_split["col_1"][:2], ["3", "2"])
prev_format = dset[list(dset.keys())[0]].format
for dset_split in dset.values():
dset_split.set_format(**dset_split.format)
self.assertEqual(prev_format, dset_split.format)
dset.set_transform(transform=transform, columns=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0].keys()), 2)
self.assertEqual(dset_split[0]["col_2"], "A")
del dset
def test_with_format(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset2 = dset.with_format("numpy", columns=["col_1"])
dset.set_format("numpy", columns=["col_1"])
for dset_split, dset_split2 in zip(dset.values(), dset2.values()):
self.assertDictEqual(dset_split.format, dset_split2.format)
del dset, dset2
def test_with_transform(self):
def transform(batch):
return {k: [str(i).upper() for i in v] for k, v in batch.items()}
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset2 = dset.with_transform(transform, columns=["col_1"])
dset.set_transform(transform, columns=["col_1"])
for dset_split, dset_split2 in zip(dset.values(), dset2.values()):
self.assertDictEqual(dset_split.format, dset_split2.format)
del dset, dset2
def test_cast(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
features = dset["train"].features
features["col_1"] = Value("float64")
dset = dset.cast(features)
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 2)
self.assertEqual(dset_split.features["col_1"], Value("float64"))
self.assertIsInstance(dset_split[0]["col_1"], float)
del dset
def test_remove_columns(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.remove_columns(column_names="col_1")
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 1)
self.assertListEqual(list(dset_split.column_names), ["col_2"])
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.remove_columns(column_names=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 0)
dset = self._create_dummy_dataset_dict(multiple_columns=True)
for dset_split in dset.values():
dset_split._format_columns = ["col_1", "col_2"]
dset = dset.remove_columns(column_names=["col_1"])
for dset_split in dset.values():
self.assertListEqual(dset_split._format_columns, ["col_2"])
self.assertEqual(dset_split.num_columns, 1)
self.assertListEqual(list(dset_split.column_names), ["col_2"])
del dset
def test_rename_column(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.rename_column(original_column_name="col_1", new_column_name="new_name")
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 2)
self.assertListEqual(list(dset_split.column_names), ["new_name", "col_2"])
del dset
def test_select_columns(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.select_columns(column_names=[])
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 0)
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.select_columns(column_names="col_1")
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 1)
self.assertListEqual(list(dset_split.column_names), ["col_1"])
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.select_columns(column_names=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 2)
dset = self._create_dummy_dataset_dict(multiple_columns=True)
for dset_split in dset.values():
dset_split._format_columns = ["col_1", "col_2"]
dset = dset.select_columns(column_names=["col_1"])
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 1)
self.assertListEqual(list(dset_split.column_names), ["col_1"])
self.assertListEqual(dset_split._format_columns, ["col_1"])
def test_map(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
mapped_dsets_1: DatasetDict = dsets.map(lambda ex: {"foo": ["bar"] * len(ex["filename"])}, batched=True)
self.assertListEqual(list(dsets.keys()), list(mapped_dsets_1.keys()))
self.assertListEqual(mapped_dsets_1["train"].column_names, ["filename", "foo"])
cache_file_names = {
"train": os.path.join(tmp_dir, "train.arrow"),
"test": os.path.join(tmp_dir, "test.arrow"),
}
mapped_dsets_2: DatasetDict = mapped_dsets_1.map(
lambda ex: {"bar": ["foo"] * len(ex["filename"])}, batched=True, cache_file_names=cache_file_names
)
self.assertListEqual(list(dsets.keys()), list(mapped_dsets_2.keys()))
self.assertListEqual(sorted(mapped_dsets_2["train"].column_names), sorted(["filename", "foo", "bar"]))
del dsets, mapped_dsets_1, mapped_dsets_2
def test_iterable_map(self):
dsets = self._create_dummy_iterable_dataset_dict()
fn_kwargs = {"n": 3}
mapped_dsets: IterableDatasetDict = dsets.map(
lambda x, n: {"foo": [n] * len(x["filename"])},
batched=True,
fn_kwargs=fn_kwargs,
)
mapped_example = next(iter(mapped_dsets["train"]))
self.assertListEqual(sorted(mapped_example.keys()), sorted(["filename", "foo"]))
self.assertLessEqual(mapped_example["foo"], 3)
del dsets, mapped_dsets
def test_filter(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
filtered_dsets_1: DatasetDict = dsets.filter(lambda ex: int(ex["filename"].split("_")[-1]) < 10)
self.assertListEqual(list(dsets.keys()), list(filtered_dsets_1.keys()))
self.assertEqual(len(filtered_dsets_1["train"]), 10)
cache_file_names = {
"train": os.path.join(tmp_dir, "train.arrow"),
"test": os.path.join(tmp_dir, "test.arrow"),
}
filtered_dsets_2: DatasetDict = filtered_dsets_1.filter(
lambda ex: int(ex["filename"].split("_")[-1]) < 5, cache_file_names=cache_file_names
)
self.assertListEqual(list(dsets.keys()), list(filtered_dsets_2.keys()))
self.assertEqual(len(filtered_dsets_2["train"]), 5)
filtered_dsets_3: DatasetDict = dsets.filter(
lambda examples: [int(ex.split("_")[-1]) < 10 for ex in examples["filename"]], batched=True
)
self.assertListEqual(list(dsets.keys()), list(filtered_dsets_3.keys()))
self.assertEqual(len(filtered_dsets_3["train"]), 10)
del dsets, filtered_dsets_1, filtered_dsets_2, filtered_dsets_3
def test_iterable_filter(self):
dsets = self._create_dummy_iterable_dataset_dict()
example = next(iter(dsets["train"]))
fn_kwargs = {"n": 3}
filtered_dsets: IterableDatasetDict = dsets.filter(
lambda ex, n: n < int(ex["filename"].split("_")[-1]), fn_kwargs=fn_kwargs
)
filtered_example = next(iter(filtered_dsets["train"]))
self.assertListEqual(list(example.keys()), list(filtered_example.keys()))
self.assertEqual(int(filtered_example["filename"].split("_")[-1]), 4) # id starts from 3
del dsets, filtered_dsets
def test_sort(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
sorted_dsets_1: DatasetDict = dsets.sort("filename")
self.assertListEqual(list(dsets.keys()), list(sorted_dsets_1.keys()))
self.assertListEqual(
[f.split("_")[-1] for f in sorted_dsets_1["train"]["filename"]],
sorted(f"{x:03d}" for x in range(30)),
)
indices_cache_file_names = {
"train": os.path.join(tmp_dir, "train.arrow"),
"test": os.path.join(tmp_dir, "test.arrow"),
}
sorted_dsets_2: DatasetDict = sorted_dsets_1.sort(
"filename", indices_cache_file_names=indices_cache_file_names, reverse=True
)
self.assertListEqual(list(dsets.keys()), list(sorted_dsets_2.keys()))
self.assertListEqual(
[f.split("_")[-1] for f in sorted_dsets_2["train"]["filename"]],
sorted((f"{x:03d}" for x in range(30)), reverse=True),
)
del dsets, sorted_dsets_1, sorted_dsets_2
def test_shuffle(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
indices_cache_file_names = {
"train": os.path.join(tmp_dir, "train.arrow"),
"test": os.path.join(tmp_dir, "test.arrow"),
}
seeds = {
"train": 1234,
"test": 1234,
}
dsets_shuffled = dsets.shuffle(
seeds=seeds, indices_cache_file_names=indices_cache_file_names, load_from_cache_file=False
)
self.assertListEqual(dsets_shuffled["train"]["filename"], dsets_shuffled["test"]["filename"])
self.assertEqual(len(dsets_shuffled["train"]), 30)
self.assertEqual(dsets_shuffled["train"][0]["filename"], "my_name-train_028")
self.assertEqual(dsets_shuffled["train"][2]["filename"], "my_name-train_010")
self.assertDictEqual(dsets["train"].features, Features({"filename": Value("string")}))
self.assertDictEqual(dsets_shuffled["train"].features, Features({"filename": Value("string")}))
# Reproducibility
indices_cache_file_names_2 = {
"train": os.path.join(tmp_dir, "train_2.arrow"),
"test": os.path.join(tmp_dir, "test_2.arrow"),
}
dsets_shuffled_2 = dsets.shuffle(
seeds=seeds, indices_cache_file_names=indices_cache_file_names_2, load_from_cache_file=False
)
self.assertListEqual(dsets_shuffled["train"]["filename"], dsets_shuffled_2["train"]["filename"])
seeds = {
"train": 1234,
"test": 1,
}
indices_cache_file_names_3 = {
"train": os.path.join(tmp_dir, "train_3.arrow"),
"test": os.path.join(tmp_dir, "test_3.arrow"),
}
dsets_shuffled_3 = dsets.shuffle(
seeds=seeds, indices_cache_file_names=indices_cache_file_names_3, load_from_cache_file=False
)
self.assertNotEqual(dsets_shuffled_3["train"]["filename"], dsets_shuffled_3["test"]["filename"])
# other input types
dsets_shuffled_int = dsets.shuffle(42)
dsets_shuffled_alias = dsets.shuffle(seed=42)
dsets_shuffled_none = dsets.shuffle()
self.assertEqual(len(dsets_shuffled_int["train"]), 30)
self.assertEqual(len(dsets_shuffled_alias["train"]), 30)
self.assertEqual(len(dsets_shuffled_none["train"]), 30)
del dsets, dsets_shuffled, dsets_shuffled_2, dsets_shuffled_3
del dsets_shuffled_int, dsets_shuffled_alias, dsets_shuffled_none
def test_flatten_indices(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
indices_cache_file_names = {
"train": os.path.join(tmp_dir, "train.arrow"),
"test": os.path.join(tmp_dir, "test.arrow"),
}
dsets_shuffled = dsets.shuffle(
seed=42, indices_cache_file_names=indices_cache_file_names, load_from_cache_file=False
)
self.assertIsNotNone(dsets_shuffled["train"]._indices)
self.assertIsNotNone(dsets_shuffled["test"]._indices)
dsets_flat = dsets_shuffled.flatten_indices()
self.assertIsNone(dsets_flat["train"]._indices)
self.assertIsNone(dsets_flat["test"]._indices)
del dsets, dsets_shuffled, dsets_flat
def test_check_values_type(self):
dsets = self._create_dummy_dataset_dict()
dsets["bad_split"] = None
self.assertRaises(TypeError, dsets.map, lambda x: x)
self.assertRaises(TypeError, dsets.filter, lambda x: True)
self.assertRaises(TypeError, dsets.shuffle)
self.assertRaises(TypeError, dsets.sort, "filename")
del dsets
def test_serialization(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
dsets.save_to_disk(tmp_dir)
reloaded_dsets = DatasetDict.load_from_disk(tmp_dir)
self.assertListEqual(sorted(reloaded_dsets), ["test", "train"])
self.assertEqual(len(reloaded_dsets["train"]), 30)
self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"])
self.assertEqual(len(reloaded_dsets["test"]), 30)
self.assertListEqual(reloaded_dsets["test"].column_names, ["filename"])
del reloaded_dsets
del dsets["test"]
dsets.save_to_disk(tmp_dir)
reloaded_dsets = DatasetDict.load_from_disk(tmp_dir)
self.assertListEqual(sorted(reloaded_dsets), ["train"])
self.assertEqual(len(reloaded_dsets["train"]), 30)
self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"])
del dsets, reloaded_dsets
dsets = self._create_dummy_dataset_dict()
dsets.save_to_disk(tmp_dir, num_shards={"train": 3, "test": 2})
reloaded_dsets = DatasetDict.load_from_disk(tmp_dir)
self.assertListEqual(sorted(reloaded_dsets), ["test", "train"])
self.assertEqual(len(reloaded_dsets["train"]), 30)
self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"])
self.assertEqual(len(reloaded_dsets["train"].cache_files), 3)
self.assertEqual(len(reloaded_dsets["test"]), 30)
self.assertListEqual(reloaded_dsets["test"].column_names, ["filename"])
self.assertEqual(len(reloaded_dsets["test"].cache_files), 2)
del reloaded_dsets
dsets = self._create_dummy_dataset_dict()
dsets.save_to_disk(tmp_dir, num_proc=2)
reloaded_dsets = DatasetDict.load_from_disk(tmp_dir)
self.assertListEqual(sorted(reloaded_dsets), ["test", "train"])
self.assertEqual(len(reloaded_dsets["train"]), 30)
self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"])
self.assertEqual(len(reloaded_dsets["train"].cache_files), 2)
self.assertEqual(len(reloaded_dsets["test"]), 30)
self.assertListEqual(reloaded_dsets["test"].column_names, ["filename"])
self.assertEqual(len(reloaded_dsets["test"].cache_files), 2)
del reloaded_dsets
def test_load_from_disk(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
dsets.save_to_disk(tmp_dir)
del dsets
dsets = load_from_disk(tmp_dir)
self.assertListEqual(sorted(dsets), ["test", "train"])
self.assertEqual(len(dsets["train"]), 30)
self.assertListEqual(dsets["train"].column_names, ["filename"])
self.assertEqual(len(dsets["test"]), 30)
self.assertListEqual(dsets["test"].column_names, ["filename"])
del dsets
def test_align_labels_with_mapping(self):
train_features = Features(
{
"input_text": Value("string"),
"input_labels": ClassLabel(num_classes=3, names=["entailment", "neutral", "contradiction"]),
}
)
test_features = Features(
{
"input_text": Value("string"),
"input_labels": ClassLabel(num_classes=3, names=["entailment", "contradiction", "neutral"]),
}
)
train_data = {"input_text": ["a", "a", "b", "b", "c", "c"], "input_labels": [0, 0, 1, 1, 2, 2]}
test_data = {"input_text": ["a", "a", "c", "c", "b", "b"], "input_labels": [0, 0, 1, 1, 2, 2]}
label2id = {"CONTRADICTION": 0, "ENTAILMENT": 2, "NEUTRAL": 1}
id2label = {v: k for k, v in label2id.items()}
train_expected_labels = [2, 2, 1, 1, 0, 0]
test_expected_labels = [2, 2, 0, 0, 1, 1]
train_expected_label_names = [id2label[idx] for idx in train_expected_labels]
test_expected_label_names = [id2label[idx] for idx in test_expected_labels]
dsets = DatasetDict(
{
"train": Dataset.from_dict(train_data, features=train_features),
"test": Dataset.from_dict(test_data, features=test_features),
}
)
dsets = dsets.align_labels_with_mapping(label2id, "input_labels")
self.assertListEqual(train_expected_labels, dsets["train"]["input_labels"])
self.assertListEqual(test_expected_labels, dsets["test"]["input_labels"])
train_aligned_label_names = [
dsets["train"].features["input_labels"].int2str(idx) for idx in dsets["train"]["input_labels"]
]
test_aligned_label_names = [
dsets["test"].features["input_labels"].int2str(idx) for idx in dsets["test"]["input_labels"]
]
self.assertListEqual(train_expected_label_names, train_aligned_label_names)
self.assertListEqual(test_expected_label_names, test_aligned_label_names)
def test_dummy_datasetdict_serialize_fs(mockfs):
dataset_dict = DatasetDict(
{
"train": Dataset.from_dict({"a": range(30)}),
"test": Dataset.from_dict({"a": range(10)}),
}
)
dataset_path = "mock://my_dataset"
dataset_dict.save_to_disk(dataset_path, storage_options=mockfs.storage_options)
assert mockfs.isdir(dataset_path)
assert mockfs.glob(dataset_path + "/*")
reloaded = dataset_dict.load_from_disk(dataset_path, storage_options=mockfs.storage_options)
assert list(reloaded) == list(dataset_dict)
for k in dataset_dict:
assert reloaded[k].features == dataset_dict[k].features
assert reloaded[k].to_dict() == dataset_dict[k].to_dict()
def _check_csv_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_datasetdict_from_csv_keep_in_memory(keep_in_memory, csv_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = DatasetDict.from_csv({"train": csv_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory)
_check_csv_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_datasetdict_from_csv_features(features, csv_path, tmp_path):
cache_dir = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
default_expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = DatasetDict.from_csv({"train": csv_path}, features=features, cache_dir=cache_dir)
_check_csv_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_datasetdict_from_csv_split(split, csv_path, tmp_path):
if split:
path = {split: csv_path}
else:
split = "train"
path = {"train": csv_path, "test": csv_path}
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
dataset = DatasetDict.from_csv(path, cache_dir=cache_dir)
_check_csv_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def _check_json_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_datasetdict_from_json_keep_in_memory(keep_in_memory, jsonl_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = DatasetDict.from_json({"train": jsonl_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory)
_check_json_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_datasetdict_from_json_features(features, jsonl_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = DatasetDict.from_json({"train": jsonl_path}, features=features, cache_dir=cache_dir)
_check_json_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_datasetdict_from_json_splits(split, jsonl_path, tmp_path):
if split:
path = {split: jsonl_path}
else:
split = "train"
path = {"train": jsonl_path, "test": jsonl_path}
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
dataset = DatasetDict.from_json(path, cache_dir=cache_dir)
_check_json_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def _check_parquet_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_datasetdict_from_parquet_keep_in_memory(keep_in_memory, parquet_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = DatasetDict.from_parquet({"train": parquet_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory)
_check_parquet_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_datasetdict_from_parquet_features(features, parquet_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = DatasetDict.from_parquet({"train": parquet_path}, features=features, cache_dir=cache_dir)
_check_parquet_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_datasetdict_from_parquet_split(split, parquet_path, tmp_path):
if split:
path = {split: parquet_path}
else:
split = "train"
path = {"train": parquet_path, "test": parquet_path}
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
dataset = DatasetDict.from_parquet(path, cache_dir=cache_dir)
_check_parquet_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def _check_text_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_datasetdict_from_text_keep_in_memory(keep_in_memory, text_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = DatasetDict.from_text({"train": text_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory)
_check_text_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
],
)
def test_datasetdict_from_text_features(features, text_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"text": "string"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = DatasetDict.from_text({"train": text_path}, features=features, cache_dir=cache_dir)
_check_text_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_datasetdict_from_text_split(split, text_path, tmp_path):
if split:
path = {split: text_path}
else:
split = "train"
path = {"train": text_path, "test": text_path}
cache_dir = tmp_path / "cache"
expected_features = {"text": "string"}
dataset = DatasetDict.from_text(path, cache_dir=cache_dir)
_check_text_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
| 0 |
hf_public_repos/datasets | hf_public_repos/datasets/tests/test_hf_gcp.py | import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
DATASETS_ON_HF_GCP = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def list_datasets_on_hf_gcp_parameters(with_config=True):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=True))
class TestDatasetOnHfGcp(TestCase):
dataset = None
config_name = None
def test_dataset_info_available(self, dataset, config_name):
with TemporaryDirectory() as tmp_dir:
dataset_module = dataset_module_factory(dataset, cache_dir=tmp_dir)
builder_cls = import_main_class(dataset_module.module_path, dataset=True)
builder_instance: DatasetBuilder = builder_cls(
cache_dir=tmp_dir,
config_name=config_name,
hash=dataset_module.hash,
)
dataset_info_url = "/".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=False).replace(os.sep, "/"),
config.DATASET_INFO_FILENAME,
]
)
datset_info_path = cached_path(dataset_info_url, cache_dir=tmp_dir)
self.assertTrue(os.path.exists(datset_info_path))
@pytest.mark.integration
def test_as_dataset_from_hf_gcs(tmp_path_factory):
tmp_dir = tmp_path_factory.mktemp("test_hf_gcp") / "test_wikipedia_simple"
dataset_module = dataset_module_factory("wikipedia", cache_dir=tmp_dir)
builder_cls = import_main_class(dataset_module.module_path)
builder_instance: DatasetBuilder = builder_cls(
cache_dir=tmp_dir,
config_name="20220301.frr",
hash=dataset_module.hash,
)
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
builder_instance._download_and_prepare = None
builder_instance.download_and_prepare()
ds = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def test_as_streaming_dataset_from_hf_gcs(tmp_path):
dataset_module = dataset_module_factory("wikipedia", cache_dir=tmp_path)
builder_cls = import_main_class(dataset_module.module_path, dataset=True)
builder_instance: DatasetBuilder = builder_cls(
cache_dir=tmp_path,
config_name="20220301.frr",
hash=dataset_module.hash,
)
ds = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(ds, IterableDatasetDict)
assert "train" in ds
assert isinstance(ds["train"], IterableDataset)
assert next(iter(ds["train"]))
| 0 |
hf_public_repos/datasets | hf_public_repos/datasets/tests/utils.py | import asyncio
import importlib.metadata
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_remote_tests = parse_flag_from_env("RUN_REMOTE", default=False)
_run_local_tests = parse_flag_from_env("RUN_LOCAL", default=True)
_run_packaged_tests = parse_flag_from_env("RUN_PACKAGED", default=True)
# Compression
require_lz4 = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4")
require_py7zr = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr")
require_zstandard = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard")
# Audio
require_sndfile = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("soundfile") is None or version.parse(importlib.metadata.version("soundfile")) < version.parse("0.12.0"),
reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ",
)
# Beam
require_beam = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"),
reason="test requires apache-beam and a compatible dill version",
)
# Dill-cloudpickle compatibility
require_dill_gt_0_3_2 = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("0.3.2"),
reason="test requires dill>0.3.2 for cloudpickle compatibility",
)
# Windows
require_not_windows = pytest.mark.skipif(
sys.platform == "win32",
reason="test should not be run on Windows",
)
def require_faiss(test_case):
"""
Decorator marking a test that requires Faiss.
These tests are skipped when Faiss isn't installed.
"""
try:
import faiss # noqa
except ImportError:
test_case = unittest.skip("test requires faiss")(test_case)
return test_case
def require_regex(test_case):
"""
Decorator marking a test that requires regex.
These tests are skipped when Regex isn't installed.
"""
try:
import regex # noqa
except ImportError:
test_case = unittest.skip("test requires regex")(test_case)
return test_case
def require_elasticsearch(test_case):
"""
Decorator marking a test that requires ElasticSearch.
These tests are skipped when ElasticSearch isn't installed.
"""
try:
import elasticsearch # noqa
except ImportError:
test_case = unittest.skip("test requires elasticsearch")(test_case)
return test_case
def require_sqlalchemy(test_case):
"""
Decorator marking a test that requires SQLAlchemy.
These tests are skipped when SQLAlchemy isn't installed.
"""
try:
import sqlalchemy # noqa
except ImportError:
test_case = unittest.skip("test requires sqlalchemy")(test_case)
return test_case
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not config.TORCH_AVAILABLE:
test_case = unittest.skip("test requires PyTorch")(test_case)
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow.
These tests are skipped when TensorFlow isn't installed.
"""
if not config.TF_AVAILABLE:
test_case = unittest.skip("test requires TensorFlow")(test_case)
return test_case
def require_jax(test_case):
"""
Decorator marking a test that requires JAX.
These tests are skipped when JAX isn't installed.
"""
if not config.JAX_AVAILABLE:
test_case = unittest.skip("test requires JAX")(test_case)
return test_case
def require_pil(test_case):
"""
Decorator marking a test that requires Pillow.
These tests are skipped when Pillow isn't installed.
"""
if not config.PIL_AVAILABLE:
test_case = unittest.skip("test requires Pillow")(test_case)
return test_case
def require_transformers(test_case):
"""
Decorator marking a test that requires transformers.
These tests are skipped when transformers isn't installed.
"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers")(test_case)
else:
return test_case
def require_tiktoken(test_case):
"""
Decorator marking a test that requires tiktoken.
These tests are skipped when transformers isn't installed.
"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken")(test_case)
else:
return test_case
def require_spacy(test_case):
"""
Decorator marking a test that requires spacy.
These tests are skipped when they aren't installed.
"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy")(test_case)
else:
return test_case
def require_spacy_model(model):
"""
Decorator marking a test that requires a spacy model.
These tests are skipped when they aren't installed.
"""
def _require_spacy_model(test_case):
try:
import spacy # noqa F401
spacy.load(model)
except ImportError:
return unittest.skip("test requires spacy")(test_case)
except OSError:
return unittest.skip("test requires spacy model '{}'".format(model))(test_case)
else:
return test_case
return _require_spacy_model
def require_pyspark(test_case):
"""
Decorator marking a test that requires pyspark.
These tests are skipped when pyspark isn't installed.
"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark")(test_case)
else:
return test_case
def require_joblibspark(test_case):
"""
Decorator marking a test that requires joblibspark.
These tests are skipped when pyspark isn't installed.
"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark")(test_case)
else:
return test_case
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable
to a truthy value to run them.
"""
if not _run_slow_tests or _run_slow_tests == 0:
test_case = unittest.skip("test is slow")(test_case)
return test_case
def local(test_case):
"""
Decorator marking a test as local
Local tests are run by default. Set the RUN_LOCAL environment variable
to a falsy value to not run them.
"""
if not _run_local_tests or _run_local_tests == 0:
test_case = unittest.skip("test is local")(test_case)
return test_case
def packaged(test_case):
"""
Decorator marking a test as packaged
Packaged tests are run by default. Set the RUN_PACKAGED environment variable
to a falsy value to not run them.
"""
if not _run_packaged_tests or _run_packaged_tests == 0:
test_case = unittest.skip("test is packaged")(test_case)
return test_case
def remote(test_case):
"""
Decorator marking a test as one that relies on GitHub or the Hugging Face Hub.
Remote tests are skipped by default. Set the RUN_REMOTE environment variable
to a falsy value to not run them.
"""
if not _run_remote_tests or _run_remote_tests == 0:
test_case = unittest.skip("test requires remote")(test_case)
return test_case
def for_all_test_methods(*decorators):
def decorate(cls):
for name, fn in cls.__dict__.items():
if callable(fn) and name.startswith("test"):
for decorator in decorators:
fn = decorator(fn)
setattr(cls, name, fn)
return cls
return decorate
class RequestWouldHangIndefinitelyError(Exception):
pass
class OfflineSimulationMode(Enum):
CONNECTION_FAILS = 0
CONNECTION_TIMES_OUT = 1
HF_DATASETS_OFFLINE_SET_TO_1 = 2
@contextmanager
def offline(mode=OfflineSimulationMode.CONNECTION_FAILS, timeout=1e-16):
"""
Simulate offline mode.
There are three offline simulatiom modes:
CONNECTION_FAILS (default mode): a ConnectionError is raised for each network call.
Connection errors are created by mocking socket.socket
CONNECTION_TIMES_OUT: the connection hangs until it times out.
The default timeout value is low (1e-16) to speed up the tests.
Timeout errors are created by mocking requests.request
HF_DATASETS_OFFLINE_SET_TO_1: the HF_DATASETS_OFFLINE environment variable is set to 1.
This makes the http/ftp calls of the library instantly fail and raise an OfflineModeEmabled error.
"""
online_request = requests.Session().request
def timeout_request(session, method, url, **kwargs):
# Change the url to an invalid url so that the connection hangs
invalid_url = "https://10.255.255.1"
if kwargs.get("timeout") is None:
raise RequestWouldHangIndefinitelyError(
f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout."
)
kwargs["timeout"] = timeout
try:
return online_request(method, invalid_url, **kwargs)
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
e.request.url = url
max_retry_error = e.args[0]
max_retry_error.args = (max_retry_error.args[0].replace("10.255.255.1", f"OfflineMock[{url}]"),)
e.args = (max_retry_error,)
raise
def raise_connection_error(session, prepared_request, **kwargs):
raise requests.ConnectionError("Offline mode is enabled.", request=prepared_request)
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send", raise_connection_error):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request", timeout_request):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE", True):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum.")
@contextmanager
def set_current_working_directory_to_temp_dir(*args, **kwargs):
original_working_dir = str(Path().resolve())
with tempfile.TemporaryDirectory(*args, **kwargs) as tmp_dir:
try:
os.chdir(tmp_dir)
yield
finally:
os.chdir(original_working_dir)
@contextmanager
def assert_arrow_memory_increases():
import gc
gc.collect()
previous_allocated_memory = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def assert_arrow_memory_doesnt_increase():
import gc
gc.collect()
previous_allocated_memory = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def is_rng_equal(rng1, rng2):
return deepcopy(rng1).integers(0, 100, 10).tolist() == deepcopy(rng2).integers(0, 100, 10).tolist()
def xfail_if_500_502_http_error(func):
import decorator
from requests.exceptions import HTTPError
def _wrapper(func, *args, **kwargs):
try:
return func(*args, **kwargs)
except HTTPError as err:
if str(err).startswith("500") or str(err).startswith("502"):
pytest.xfail(str(err))
raise err
return decorator.decorator(_wrapper, func)
# --- distributed testing functions --- #
# copied from transformers
# originally adapted from https://stackoverflow.com/a/59041913/9201239
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda line: tee(line, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda line: tee(line, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torchrun`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
| 0 |
hf_public_repos/datasets | hf_public_repos/datasets/tests/test_offline_util.py | import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def test_offline_with_timeout():
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT):
with pytest.raises(RequestWouldHangIndefinitelyError):
requests.request("GET", "https://huggingface.co")
with pytest.raises(requests.exceptions.ConnectTimeout):
requests.request("GET", "https://huggingface.co", timeout=1.0)
@pytest.mark.integration
def test_offline_with_connection_error():
with offline(OfflineSimulationMode.CONNECTION_FAILS):
with pytest.raises(requests.exceptions.ConnectionError):
requests.request("GET", "https://huggingface.co")
def test_offline_with_datasets_offline_mode_enabled():
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1):
with pytest.raises(ConnectionError):
http_head("https://huggingface.co")
| 0 |
hf_public_repos/datasets | hf_public_repos/datasets/tests/test_sharding_utils.py | import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected",
[
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10)]),
({"num_shards": 10, "max_num_jobs": 10}, [range(i, i + 1) for i in range(10)]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1)]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0, 4), range(4, 7), range(7, 10)]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0, 1), range(1, 2), range(2, 3)]),
],
)
def test_distribute_shards(kwargs, expected):
out = _distribute_shards(**kwargs)
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected",
[
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
],
)
def test_split_gen_kwargs(gen_kwargs, max_num_jobs, expected):
out = _split_gen_kwargs(gen_kwargs, max_num_jobs)
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected",
[
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
],
)
def test_number_of_shards_in_gen_kwargs(gen_kwargs, expected):
if expected is RuntimeError:
with pytest.raises(expected):
_number_of_shards_in_gen_kwargs(gen_kwargs)
else:
out = _number_of_shards_in_gen_kwargs(gen_kwargs)
assert out == expected
| 0 |
hf_public_repos/datasets | hf_public_repos/datasets/tests/test_streaming_download_manager.py | import json
import os
import re
from pathlib import Path
import pytest
from fsspec.registry import _registry as _fsspec_registry
from fsspec.spec import AbstractBufferedFile, AbstractFileSystem
from datasets.download.download_config import DownloadConfig
from datasets.download.streaming_download_manager import (
StreamingDownloadManager,
_get_extraction_protocol,
xbasename,
xexists,
xgetsize,
xglob,
xisdir,
xisfile,
xjoin,
xlistdir,
xnumpy_load,
xopen,
xPath,
xrelpath,
xsplit,
xsplitext,
xwalk,
)
from datasets.filesystems import COMPRESSION_FILESYSTEMS
from datasets.utils.hub import hf_hub_url
from .utils import require_lz4, require_zstandard, slow
TEST_URL = "https://huggingface.co/datasets/lhoestq/test/raw/main/some_text.txt"
TEST_URL_CONTENT = "foo\nbar\nfoobar"
TEST_GG_DRIVE_FILENAME = "train.tsv"
TEST_GG_DRIVE_URL = "https://drive.google.com/uc?export=download&id=17bOgBDc3hRCoPZ89EYtKDzK-yXAWat94"
TEST_GG_DRIVE_GZIPPED_URL = "https://drive.google.com/uc?export=download&id=1Bt4Garpf0QLiwkJhHJzXaVa0I0H5Qhwz"
TEST_GG_DRIVE_ZIPPED_URL = "https://drive.google.com/uc?export=download&id=1k92sUfpHxKq8PXWRr7Y5aNHXwOCNUmqh"
TEST_GG_DRIVE_CONTENT = """\
pokemon_name, type
Charmander, fire
Squirtle, water
Bulbasaur, grass"""
class DummyTestFS(AbstractFileSystem):
protocol = "mock"
_file_class = AbstractBufferedFile
_fs_contents = (
{"name": "top_level", "type": "directory"},
{"name": "top_level/second_level", "type": "directory"},
{"name": "top_level/second_level/date=2019-10-01", "type": "directory"},
{
"name": "top_level/second_level/date=2019-10-01/a.parquet",
"type": "file",
"size": 100,
},
{
"name": "top_level/second_level/date=2019-10-01/b.parquet",
"type": "file",
"size": 100,
},
{"name": "top_level/second_level/date=2019-10-02", "type": "directory"},
{
"name": "top_level/second_level/date=2019-10-02/a.parquet",
"type": "file",
"size": 100,
},
{"name": "top_level/second_level/date=2019-10-04", "type": "directory"},
{
"name": "top_level/second_level/date=2019-10-04/a.parquet",
"type": "file",
"size": 100,
},
{"name": "misc", "type": "directory"},
{"name": "misc/foo.txt", "type": "file", "size": 100},
{"name": "glob_test", "type": "directory", "size": 0},
{"name": "glob_test/hat", "type": "directory", "size": 0},
{"name": "glob_test/hat/^foo.txt", "type": "file", "size": 100},
{"name": "glob_test/dollar", "type": "directory", "size": 0},
{"name": "glob_test/dollar/$foo.txt", "type": "file", "size": 100},
{"name": "glob_test/lbrace", "type": "directory", "size": 0},
{"name": "glob_test/lbrace/{foo.txt", "type": "file", "size": 100},
{"name": "glob_test/rbrace", "type": "directory", "size": 0},
{"name": "glob_test/rbrace/}foo.txt", "type": "file", "size": 100},
)
def __getitem__(self, name):
for item in self._fs_contents:
if item["name"] == name:
return item
raise IndexError(f"{name} not found!")
def ls(self, path, detail=True, refresh=True, **kwargs):
if kwargs.pop("strip_proto", True):
path = self._strip_protocol(path)
files = not refresh and self._ls_from_cache(path)
if not files:
files = [file for file in self._fs_contents if path == self._parent(file["name"])]
files.sort(key=lambda file: file["name"])
self.dircache[path.rstrip("/")] = files
if detail:
return files
return [file["name"] for file in files]
def _open(
self,
path,
mode="rb",
block_size=None,
autocommit=True,
cache_options=None,
**kwargs,
):
return self._file_class(
self,
path,
mode,
block_size,
autocommit,
cache_options=cache_options,
**kwargs,
)
@pytest.fixture
def mock_fsspec():
_fsspec_registry["mock"] = DummyTestFS
yield
del _fsspec_registry["mock"]
def _readd_double_slash_removed_by_path(path_as_posix: str) -> str:
"""Path(...) on an url path like zip://file.txt::http://host.com/data.zip
converts the :// to :/
This function readds the ://
It handles cases like:
- https://host.com/data.zip
- C://data.zip
- zip://file.txt::https://host.com/data.zip
- zip://file.txt::/Users/username/data.zip
- zip://file.txt::C://data.zip
Args:
path_as_posix (str): output of Path(...).as_posix()
Returns:
str: the url path with :// instead of :/
"""
return re.sub("([A-z]:/)([A-z:])", r"\g<1>/\g<2>", path_as_posix)
@pytest.mark.parametrize(
"input_path, paths_to_join, expected_path",
[
(
"https://host.com/archive.zip",
("file.txt",),
"https://host.com/archive.zip/file.txt",
),
(
"zip://::https://host.com/archive.zip",
("file.txt",),
"zip://file.txt::https://host.com/archive.zip",
),
(
"zip://folder::https://host.com/archive.zip",
("file.txt",),
"zip://folder/file.txt::https://host.com/archive.zip",
),
(
".",
("file.txt",),
os.path.join(".", "file.txt"),
),
(
str(Path().resolve()),
("file.txt",),
str((Path().resolve() / "file.txt")),
),
],
)
def test_xjoin(input_path, paths_to_join, expected_path):
output_path = xjoin(input_path, *paths_to_join)
assert output_path == expected_path
output_path = xPath(input_path).joinpath(*paths_to_join)
assert output_path == xPath(expected_path)
@pytest.mark.parametrize(
"input_path, expected_path",
[
(str(Path(__file__).resolve()), str(Path(__file__).resolve().parent)),
("https://host.com/archive.zip", "https://host.com"),
(
"zip://file.txt::https://host.com/archive.zip",
"zip://::https://host.com/archive.zip",
),
(
"zip://folder/file.txt::https://host.com/archive.zip",
"zip://folder::https://host.com/archive.zip",
),
],
)
def test_xdirname(input_path, expected_path):
from datasets.download.streaming_download_manager import xdirname
output_path = xdirname(input_path)
output_path = _readd_double_slash_removed_by_path(Path(output_path).as_posix())
assert output_path == _readd_double_slash_removed_by_path(Path(expected_path).as_posix())
@pytest.mark.parametrize(
"input_path, exists",
[
("tmp_path/file.txt", True),
("tmp_path/file_that_doesnt_exist.txt", False),
("mock://top_level/second_level/date=2019-10-01/a.parquet", True),
("mock://top_level/second_level/date=2019-10-01/file_that_doesnt_exist.parquet", False),
],
)
def test_xexists(input_path, exists, tmp_path, mock_fsspec):
if input_path.startswith("tmp_path"):
input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path))
(tmp_path / "file.txt").touch()
assert xexists(input_path) is exists
@pytest.mark.integration
def test_xexists_private(hf_private_dataset_repo_txt_data, hf_token):
root_url = hf_hub_url(hf_private_dataset_repo_txt_data, "")
download_config = DownloadConfig(token=hf_token)
assert xexists(root_url + "data/text_data.txt", download_config=download_config)
assert not xexists(root_url + "file_that_doesnt_exist.txt", download_config=download_config)
@pytest.mark.parametrize(
"input_path, expected_head_and_tail",
[
(
str(Path(__file__).resolve()),
(str(Path(__file__).resolve().parent), str(Path(__file__).resolve().name)),
),
("https://host.com/archive.zip", ("https://host.com", "archive.zip")),
("zip://file.txt::https://host.com/archive.zip", ("zip://::https://host.com/archive.zip", "file.txt")),
("zip://folder::https://host.com/archive.zip", ("zip://::https://host.com/archive.zip", "folder")),
("zip://::https://host.com/archive.zip", ("zip://::https://host.com/archive.zip", "")),
],
)
def test_xsplit(input_path, expected_head_and_tail):
output_path, tail = xsplit(input_path)
expected_path, expected_tail = expected_head_and_tail
output_path = _readd_double_slash_removed_by_path(Path(output_path).as_posix())
expected_path = _readd_double_slash_removed_by_path(Path(expected_path).as_posix())
assert output_path == expected_path
assert tail == expected_tail
@pytest.mark.parametrize(
"input_path, expected_path_and_ext",
[
(
str(Path(__file__).resolve()),
(str(Path(__file__).resolve().with_suffix("")), str(Path(__file__).resolve().suffix)),
),
("https://host.com/archive.zip", ("https://host.com/archive", ".zip")),
("zip://file.txt::https://host.com/archive.zip", ("zip://file::https://host.com/archive.zip", ".txt")),
("zip://folder::https://host.com/archive.zip", ("zip://folder::https://host.com/archive.zip", "")),
("zip://::https://host.com/archive.zip", ("zip://::https://host.com/archive.zip", "")),
],
)
def test_xsplitext(input_path, expected_path_and_ext):
output_path, ext = xsplitext(input_path)
expected_path, expected_ext = expected_path_and_ext
output_path = _readd_double_slash_removed_by_path(Path(output_path).as_posix())
expected_path = _readd_double_slash_removed_by_path(Path(expected_path).as_posix())
assert output_path == expected_path
assert ext == expected_ext
def test_xopen_local(text_path):
with xopen(text_path, "r", encoding="utf-8") as f, open(text_path, encoding="utf-8") as expected_file:
assert list(f) == list(expected_file)
with xPath(text_path).open("r", encoding="utf-8") as f, open(text_path, encoding="utf-8") as expected_file:
assert list(f) == list(expected_file)
@pytest.mark.integration
def test_xopen_remote():
with xopen(TEST_URL, "r", encoding="utf-8") as f:
assert list(f) == TEST_URL_CONTENT.splitlines(keepends=True)
with xPath(TEST_URL).open("r", encoding="utf-8") as f:
assert list(f) == TEST_URL_CONTENT.splitlines(keepends=True)
@pytest.mark.parametrize(
"input_path, expected_paths",
[
("tmp_path", ["file1.txt", "file2.txt"]),
("mock://", ["glob_test", "misc", "top_level"]),
("mock://top_level", ["second_level"]),
("mock://top_level/second_level/date=2019-10-01", ["a.parquet", "b.parquet"]),
],
)
def test_xlistdir(input_path, expected_paths, tmp_path, mock_fsspec):
if input_path.startswith("tmp_path"):
input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path))
for file in ["file1.txt", "file2.txt"]:
(tmp_path / file).touch()
output_paths = sorted(xlistdir(input_path))
assert output_paths == expected_paths
@pytest.mark.integration
def test_xlistdir_private(hf_private_dataset_repo_zipped_txt_data, hf_token):
root_url = hf_hub_url(hf_private_dataset_repo_zipped_txt_data, "data.zip")
download_config = DownloadConfig(token=hf_token)
assert len(xlistdir("zip://::" + root_url, download_config=download_config)) == 1
assert len(xlistdir("zip://main_dir::" + root_url, download_config=download_config)) == 2
with pytest.raises(FileNotFoundError):
xlistdir("zip://qwertyuiop::" + root_url, download_config=download_config)
with pytest.raises(FileNotFoundError):
xlistdir(root_url, download_config=download_config)
@pytest.mark.parametrize(
"input_path, isdir",
[
("tmp_path", True),
("tmp_path/file.txt", False),
("mock://", True),
("mock://top_level", True),
("mock://dir_that_doesnt_exist", False),
],
)
def test_xisdir(input_path, isdir, tmp_path, mock_fsspec):
if input_path.startswith("tmp_path"):
input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path))
(tmp_path / "file.txt").touch()
assert xisdir(input_path) == isdir
@pytest.mark.integration
def test_xisdir_private(hf_private_dataset_repo_zipped_txt_data, hf_token):
root_url = hf_hub_url(hf_private_dataset_repo_zipped_txt_data, "data.zip")
download_config = DownloadConfig(token=hf_token)
assert xisdir("zip://::" + root_url, download_config=download_config) is True
assert xisdir("zip://main_dir::" + root_url, download_config=download_config) is True
assert xisdir("zip://qwertyuiop::" + root_url, download_config=download_config) is False
assert xisdir(root_url, download_config=download_config) is False
@pytest.mark.parametrize(
"input_path, isfile",
[
("tmp_path/file.txt", True),
("tmp_path/file_that_doesnt_exist.txt", False),
("mock://", False),
("mock://top_level/second_level/date=2019-10-01/a.parquet", True),
],
)
def test_xisfile(input_path, isfile, tmp_path, mock_fsspec):
if input_path.startswith("tmp_path"):
input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path))
(tmp_path / "file.txt").touch()
assert xisfile(input_path) == isfile
@pytest.mark.integration
def test_xisfile_private(hf_private_dataset_repo_txt_data, hf_token):
root_url = hf_hub_url(hf_private_dataset_repo_txt_data, "")
download_config = DownloadConfig(token=hf_token)
assert xisfile(root_url + "data/text_data.txt", download_config=download_config) is True
assert xisfile(root_url + "qwertyuiop", download_config=download_config) is False
@pytest.mark.parametrize(
"input_path, size",
[
("tmp_path/file.txt", 100),
("mock://", 0),
("mock://top_level/second_level/date=2019-10-01/a.parquet", 100),
],
)
def test_xgetsize(input_path, size, tmp_path, mock_fsspec):
if input_path.startswith("tmp_path"):
input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path))
(tmp_path / "file.txt").touch()
(tmp_path / "file.txt").write_bytes(b"x" * 100)
assert xgetsize(input_path) == size
@pytest.mark.integration
def test_xgetsize_private(hf_private_dataset_repo_txt_data, hf_token):
root_url = hf_hub_url(hf_private_dataset_repo_txt_data, "")
download_config = DownloadConfig(token=hf_token)
assert xgetsize(root_url + "data/text_data.txt", download_config=download_config) == 39
with pytest.raises(FileNotFoundError):
xgetsize(root_url + "qwertyuiop", download_config=download_config)
@pytest.mark.parametrize(
"input_path, expected_paths",
[
("tmp_path/*.txt", ["file1.txt", "file2.txt"]),
("mock://*", ["mock://glob_test", "mock://misc", "mock://top_level"]),
("mock://top_*", ["mock://top_level"]),
(
"mock://top_level/second_level/date=2019-10-0[1-4]",
[
"mock://top_level/second_level/date=2019-10-01",
"mock://top_level/second_level/date=2019-10-02",
"mock://top_level/second_level/date=2019-10-04",
],
),
(
"mock://top_level/second_level/date=2019-10-0[1-4]/*",
[
"mock://top_level/second_level/date=2019-10-01/a.parquet",
"mock://top_level/second_level/date=2019-10-01/b.parquet",
"mock://top_level/second_level/date=2019-10-02/a.parquet",
"mock://top_level/second_level/date=2019-10-04/a.parquet",
],
),
],
)
def test_xglob(input_path, expected_paths, tmp_path, mock_fsspec):
if input_path.startswith("tmp_path"):
input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path))
expected_paths = [str(tmp_path / file) for file in expected_paths]
for file in ["file1.txt", "file2.txt", "README.md"]:
(tmp_path / file).touch()
output_paths = sorted(xglob(input_path))
assert output_paths == expected_paths
@pytest.mark.integration
def test_xglob_private(hf_private_dataset_repo_zipped_txt_data, hf_token):
root_url = hf_hub_url(hf_private_dataset_repo_zipped_txt_data, "data.zip")
download_config = DownloadConfig(token=hf_token)
assert len(xglob("zip://**::" + root_url, download_config=download_config)) == 3
assert len(xglob("zip://qwertyuiop/*::" + root_url, download_config=download_config)) == 0
@pytest.mark.parametrize(
"input_path, expected_outputs",
[
("tmp_path", [("", [], ["file1.txt", "file2.txt", "README.md"])]),
(
"mock://top_level/second_level",
[
("mock://top_level/second_level", ["date=2019-10-01", "date=2019-10-02", "date=2019-10-04"], []),
("mock://top_level/second_level/date=2019-10-01", [], ["a.parquet", "b.parquet"]),
("mock://top_level/second_level/date=2019-10-02", [], ["a.parquet"]),
("mock://top_level/second_level/date=2019-10-04", [], ["a.parquet"]),
],
),
],
)
def test_xwalk(input_path, expected_outputs, tmp_path, mock_fsspec):
if input_path.startswith("tmp_path"):
input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path))
expected_outputs = sorted(
[
(str(tmp_path / dirpath).rstrip("/"), sorted(dirnames), sorted(filenames))
for dirpath, dirnames, filenames in expected_outputs
]
)
for file in ["file1.txt", "file2.txt", "README.md"]:
(tmp_path / file).touch()
outputs = sorted(xwalk(input_path))
outputs = [(dirpath, sorted(dirnames), sorted(filenames)) for dirpath, dirnames, filenames in outputs]
assert outputs == expected_outputs
@pytest.mark.integration
def test_xwalk_private(hf_private_dataset_repo_zipped_txt_data, hf_token):
root_url = hf_hub_url(hf_private_dataset_repo_zipped_txt_data, "data.zip")
download_config = DownloadConfig(token=hf_token)
assert len(list(xwalk("zip://::" + root_url, download_config=download_config))) == 2
assert len(list(xwalk("zip://main_dir::" + root_url, download_config=download_config))) == 1
assert len(list(xwalk("zip://qwertyuiop::" + root_url, download_config=download_config))) == 0
@pytest.mark.parametrize(
"input_path, start_path, expected_path",
[
("dir1/dir2/file.txt".replace("/", os.path.sep), "dir1", "dir2/file.txt".replace("/", os.path.sep)),
("dir1/dir2/file.txt".replace("/", os.path.sep), "dir1/dir2".replace("/", os.path.sep), "file.txt"),
("zip://file.txt::https://host.com/archive.zip", "zip://::https://host.com/archive.zip", "file.txt"),
(
"zip://folder/file.txt::https://host.com/archive.zip",
"zip://::https://host.com/archive.zip",
"folder/file.txt",
),
(
"zip://folder/file.txt::https://host.com/archive.zip",
"zip://folder::https://host.com/archive.zip",
"file.txt",
),
],
)
def test_xrelpath(input_path, start_path, expected_path):
output_path = xrelpath(input_path, start=start_path)
assert output_path == expected_path
class TestxPath:
@pytest.mark.parametrize(
"input_path",
[
"https://host.com/archive.zip",
"zip://file.txt::https://host.com/archive.zip",
"zip://dir/file.txt::https://host.com/archive.zip",
"file.txt",
str(Path().resolve() / "file.txt"),
],
)
def test_xpath_str(self, input_path):
assert str(xPath(input_path)) == input_path
@pytest.mark.parametrize(
"input_path, expected_path",
[
("https://host.com/archive.zip", "https://host.com/archive.zip"),
("zip://file.txt::https://host.com/archive.zip", "zip://file.txt::https://host.com/archive.zip"),
("zip://dir/file.txt::https://host.com/archive.zip", "zip://dir/file.txt::https://host.com/archive.zip"),
("file.txt", "file.txt"),
(str(Path().resolve() / "file.txt"), (Path().resolve() / "file.txt").as_posix()),
],
)
def test_xpath_as_posix(self, input_path, expected_path):
assert xPath(input_path).as_posix() == expected_path
@pytest.mark.parametrize(
"input_path, exists",
[
("tmp_path/file.txt", True),
("tmp_path/file_that_doesnt_exist.txt", False),
("mock://top_level/second_level/date=2019-10-01/a.parquet", True),
("mock://top_level/second_level/date=2019-10-01/file_that_doesnt_exist.parquet", False),
],
)
def test_xpath_exists(self, input_path, exists, tmp_path, mock_fsspec):
if input_path.startswith("tmp_path"):
input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path))
(tmp_path / "file.txt").touch()
assert xexists(input_path) is exists
@pytest.mark.parametrize(
"input_path, pattern, expected_paths",
[
("tmp_path", "*.txt", ["file1.txt", "file2.txt"]),
("mock://", "*", ["mock://glob_test", "mock://misc", "mock://top_level"]),
("mock://", "top_*", ["mock://top_level"]),
(
"mock://top_level/second_level",
"date=2019-10-0[1-4]",
[
"mock://top_level/second_level/date=2019-10-01",
"mock://top_level/second_level/date=2019-10-02",
"mock://top_level/second_level/date=2019-10-04",
],
),
(
"mock://top_level/second_level",
"date=2019-10-0[1-4]/*",
[
"mock://top_level/second_level/date=2019-10-01/a.parquet",
"mock://top_level/second_level/date=2019-10-01/b.parquet",
"mock://top_level/second_level/date=2019-10-02/a.parquet",
"mock://top_level/second_level/date=2019-10-04/a.parquet",
],
),
],
)
def test_xpath_glob(self, input_path, pattern, expected_paths, tmp_path, mock_fsspec):
if input_path == "tmp_path":
input_path = tmp_path
expected_paths = [tmp_path / file for file in expected_paths]
for file in ["file1.txt", "file2.txt", "README.md"]:
(tmp_path / file).touch()
else:
expected_paths = [Path(file) for file in expected_paths]
output_paths = sorted(xPath(input_path).glob(pattern))
assert output_paths == expected_paths
@pytest.mark.parametrize(
"input_path, pattern, expected_paths",
[
("tmp_path", "*.txt", ["file1.txt", "file2.txt"]),
(
"mock://",
"date=2019-10-0[1-4]",
[
"mock://top_level/second_level/date=2019-10-01",
"mock://top_level/second_level/date=2019-10-02",
"mock://top_level/second_level/date=2019-10-04",
],
),
(
"mock://top_level",
"date=2019-10-0[1-4]",
[
"mock://top_level/second_level/date=2019-10-01",
"mock://top_level/second_level/date=2019-10-02",
"mock://top_level/second_level/date=2019-10-04",
],
),
(
"mock://",
"date=2019-10-0[1-4]/*",
[
"mock://top_level/second_level/date=2019-10-01/a.parquet",
"mock://top_level/second_level/date=2019-10-01/b.parquet",
"mock://top_level/second_level/date=2019-10-02/a.parquet",
"mock://top_level/second_level/date=2019-10-04/a.parquet",
],
),
(
"mock://top_level",
"date=2019-10-0[1-4]/*",
[
"mock://top_level/second_level/date=2019-10-01/a.parquet",
"mock://top_level/second_level/date=2019-10-01/b.parquet",
"mock://top_level/second_level/date=2019-10-02/a.parquet",
"mock://top_level/second_level/date=2019-10-04/a.parquet",
],
),
],
)
def test_xpath_rglob(self, input_path, pattern, expected_paths, tmp_path, mock_fsspec):
if input_path == "tmp_path":
input_path = tmp_path
dir_path = tmp_path / "dir"
dir_path.mkdir()
expected_paths = [dir_path / file for file in expected_paths]
for file in ["file1.txt", "file2.txt", "README.md"]:
(dir_path / file).touch()
else:
expected_paths = [Path(file) for file in expected_paths]
output_paths = sorted(xPath(input_path).rglob(pattern))
assert output_paths == expected_paths
@pytest.mark.parametrize(
"input_path, expected_path",
[
("https://host.com/archive.zip", "https://host.com"),
("zip://file.txt::https://host.com/archive.zip", "zip://::https://host.com/archive.zip"),
("zip://dir/file.txt::https://host.com/archive.zip", "zip://dir::https://host.com/archive.zip"),
("file.txt", ""),
(str(Path().resolve() / "file.txt"), str(Path().resolve())),
],
)
def test_xpath_parent(self, input_path, expected_path):
assert xPath(input_path).parent == xPath(expected_path)
@pytest.mark.parametrize(
"input_path, expected",
[
("https://host.com/archive.zip", "archive.zip"),
("zip://file.txt::https://host.com/archive.zip", "file.txt"),
("zip://dir/file.txt::https://host.com/archive.zip", "file.txt"),
("file.txt", "file.txt"),
(str(Path().resolve() / "file.txt"), "file.txt"),
],
)
def test_xpath_name(self, input_path, expected):
assert xPath(input_path).name == expected
@pytest.mark.parametrize(
"input_path, expected",
[
("https://host.com/archive.zip", "archive"),
("zip://file.txt::https://host.com/archive.zip", "file"),
("zip://dir/file.txt::https://host.com/archive.zip", "file"),
("file.txt", "file"),
(str(Path().resolve() / "file.txt"), "file"),
],
)
def test_xpath_stem(self, input_path, expected):
assert xPath(input_path).stem == expected
@pytest.mark.parametrize(
"input_path, expected",
[
("https://host.com/archive.zip", ".zip"),
("zip://file.txt::https://host.com/archive.zip", ".txt"),
("zip://dir/file.txt::https://host.com/archive.zip", ".txt"),
("file.txt", ".txt"),
(str(Path().resolve() / "file.txt"), ".txt"),
],
)
def test_xpath_suffix(self, input_path, expected):
assert xPath(input_path).suffix == expected
@pytest.mark.parametrize(
"input_path, suffix, expected",
[
("https://host.com/archive.zip", ".ann", "https://host.com/archive.ann"),
("zip://file.txt::https://host.com/archive.zip", ".ann", "zip://file.ann::https://host.com/archive.zip"),
(
"zip://dir/file.txt::https://host.com/archive.zip",
".ann",
"zip://dir/file.ann::https://host.com/archive.zip",
),
("file.txt", ".ann", "file.ann"),
(str(Path().resolve() / "file.txt"), ".ann", str(Path().resolve() / "file.ann")),
],
)
def test_xpath_with_suffix(self, input_path, suffix, expected):
assert xPath(input_path).with_suffix(suffix) == xPath(expected)
@pytest.mark.parametrize("urlpath", [r"C:\\foo\bar.txt", "/foo/bar.txt", "https://f.oo/bar.txt"])
def test_streaming_dl_manager_download_dummy_path(urlpath):
dl_manager = StreamingDownloadManager()
assert dl_manager.download(urlpath) == urlpath
def test_streaming_dl_manager_download(text_path):
dl_manager = StreamingDownloadManager()
out = dl_manager.download(text_path)
assert out == text_path
with xopen(out, encoding="utf-8") as f, open(text_path, encoding="utf-8") as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("urlpath", [r"C:\\foo\bar.txt", "/foo/bar.txt", "https://f.oo/bar.txt"])
def test_streaming_dl_manager_download_and_extract_no_extraction(urlpath):
dl_manager = StreamingDownloadManager()
assert dl_manager.download_and_extract(urlpath) == urlpath
def test_streaming_dl_manager_extract(text_gz_path, text_path):
dl_manager = StreamingDownloadManager()
output_path = dl_manager.extract(text_gz_path)
path = os.path.basename(text_gz_path)
path = path[: path.rindex(".")]
assert output_path == f"gzip://{path}::{text_gz_path}"
fsspec_open_file = xopen(output_path, encoding="utf-8")
with fsspec_open_file as f, open(text_path, encoding="utf-8") as expected_file:
assert f.read() == expected_file.read()
def test_streaming_dl_manager_download_and_extract_with_extraction(text_gz_path, text_path):
dl_manager = StreamingDownloadManager()
output_path = dl_manager.download_and_extract(text_gz_path)
path = os.path.basename(text_gz_path)
path = path[: path.rindex(".")]
assert output_path == f"gzip://{path}::{text_gz_path}"
fsspec_open_file = xopen(output_path, encoding="utf-8")
with fsspec_open_file as f, open(text_path, encoding="utf-8") as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize(
"input_path, filename, expected_path",
[("https://domain.org/archive.zip", "filename.jsonl", "zip://filename.jsonl::https://domain.org/archive.zip")],
)
def test_streaming_dl_manager_download_and_extract_with_join(input_path, filename, expected_path):
dl_manager = StreamingDownloadManager()
extracted_path = dl_manager.download_and_extract(input_path)
output_path = xjoin(extracted_path, filename)
assert output_path == expected_path
@pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS)
def test_streaming_dl_manager_extract_all_supported_single_file_compression_types(
compression_fs_class, gz_file, xz_file, zstd_file, bz2_file, lz4_file, text_file
):
input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file}
input_path = input_paths[compression_fs_class.protocol]
if input_path is None:
reason = f"for '{compression_fs_class.protocol}' compression protocol, "
if compression_fs_class.protocol == "lz4":
reason += require_lz4.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(reason)
dl_manager = StreamingDownloadManager()
output_path = dl_manager.extract(input_path)
path = os.path.basename(input_path)
path = path[: path.rindex(".")]
assert output_path == f"{compression_fs_class.protocol}://{path}::{input_path}"
fsspec_open_file = xopen(output_path, encoding="utf-8")
with fsspec_open_file as f, open(text_file, encoding="utf-8") as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize(
"urlpath, expected_protocol",
[
("zip://train-00000.json.gz::https://foo.bar/data.zip", "gzip"),
("https://foo.bar/train.json.gz?dl=1", "gzip"),
("http://opus.nlpl.eu/download.php?f=Bianet/v1/moses/en-ku.txt.zip", "zip"),
("https://github.com/user/what-time-is-it/blob/master/gutenberg_time_phrases.zip?raw=true", "zip"),
("https://github.com/user/repo/blob/master/data/morph_train.tsv?raw=true", None),
("https://repo.org/bitstream/handle/20.500.12185/346/annotated_corpus.zip?sequence=3&isAllowed=y", "zip"),
("https://zenodo.org/record/2787612/files/SICK.zip?download=1", "zip"),
],
)
def test_streaming_dl_manager_get_extraction_protocol(urlpath, expected_protocol):
assert _get_extraction_protocol(urlpath) == expected_protocol
@pytest.mark.parametrize(
"urlpath, expected_protocol",
[
(TEST_GG_DRIVE_GZIPPED_URL, "gzip"),
(TEST_GG_DRIVE_ZIPPED_URL, "zip"),
],
)
@slow # otherwise it spams Google Drive and the CI gets banned
def test_streaming_dl_manager_get_extraction_protocol_gg_drive(urlpath, expected_protocol):
assert _get_extraction_protocol(urlpath) == expected_protocol
@pytest.mark.parametrize(
"urlpath",
[
"zip://train-00000.tar.gz::https://foo.bar/data.zip",
"https://foo.bar/train.tar.gz",
"https://foo.bar/train.tgz",
"https://foo.bar/train.tar",
],
)
def test_streaming_dl_manager_extract_throws(urlpath):
with pytest.raises(NotImplementedError):
_ = StreamingDownloadManager().extract(urlpath)
@slow # otherwise it spams Google Drive and the CI gets banned
@pytest.mark.integration
def test_streaming_gg_drive():
with xopen(TEST_GG_DRIVE_URL) as f:
assert f.read() == TEST_GG_DRIVE_CONTENT
@slow # otherwise it spams Google Drive and the CI gets banned
@pytest.mark.integration
def test_streaming_gg_drive_no_extract():
urlpath = StreamingDownloadManager().download_and_extract(TEST_GG_DRIVE_URL)
with xopen(urlpath) as f:
assert f.read() == TEST_GG_DRIVE_CONTENT
@slow # otherwise it spams Google Drive and the CI gets banned
@pytest.mark.integration
def test_streaming_gg_drive_gzipped():
urlpath = StreamingDownloadManager().download_and_extract(TEST_GG_DRIVE_GZIPPED_URL)
with xopen(urlpath) as f:
assert f.read() == TEST_GG_DRIVE_CONTENT
@slow # otherwise it spams Google Drive and the CI gets banned
@pytest.mark.integration
def test_streaming_gg_drive_zipped():
urlpath = StreamingDownloadManager().download_and_extract(TEST_GG_DRIVE_ZIPPED_URL)
all_files = list(xglob(xjoin(urlpath, "*")))
assert len(all_files) == 1
assert xbasename(all_files[0]) == TEST_GG_DRIVE_FILENAME
with xopen(all_files[0]) as f:
assert f.read() == TEST_GG_DRIVE_CONTENT
def _test_jsonl(path, file):
assert path.endswith(".jsonl")
for num_items, line in enumerate(file, start=1):
item = json.loads(line.decode("utf-8"))
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("archive_jsonl", ["tar_jsonl_path", "zip_jsonl_path"])
def test_iter_archive_path(archive_jsonl, request):
archive_jsonl_path = request.getfixturevalue(archive_jsonl)
dl_manager = StreamingDownloadManager()
archive_iterable = dl_manager.iter_archive(archive_jsonl_path)
num_jsonl = 0
for num_jsonl, (path, file) in enumerate(archive_iterable, start=1):
_test_jsonl(path, file)
assert num_jsonl == 2
# do it twice to make sure it's reset correctly
num_jsonl = 0
for num_jsonl, (path, file) in enumerate(archive_iterable, start=1):
_test_jsonl(path, file)
assert num_jsonl == 2
@pytest.mark.parametrize("archive_nested_jsonl", ["tar_nested_jsonl_path", "zip_nested_jsonl_path"])
def test_iter_archive_file(archive_nested_jsonl, request):
archive_nested_jsonl_path = request.getfixturevalue(archive_nested_jsonl)
dl_manager = StreamingDownloadManager()
files_iterable = dl_manager.iter_archive(archive_nested_jsonl_path)
num_tar, num_jsonl = 0, 0
for num_tar, (path, file) in enumerate(files_iterable, start=1):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(file), start=1):
_test_jsonl(subpath, subfile)
assert num_tar == 1
assert num_jsonl == 2
# do it twice to make sure it's reset correctly
num_tar, num_jsonl = 0, 0
for num_tar, (path, file) in enumerate(files_iterable, start=1):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(file), start=1):
_test_jsonl(subpath, subfile)
assert num_tar == 1
assert num_jsonl == 2
def test_iter_files(data_dir_with_hidden_files):
dl_manager = StreamingDownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(data_dir_with_hidden_files), start=1):
assert os.path.basename(file) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
def test_xnumpy_load(tmp_path):
import numpy as np
expected_x = np.arange(10)
npy_path = tmp_path / "data-x.npy"
np.save(npy_path, expected_x)
x = xnumpy_load(npy_path)
assert np.array_equal(x, expected_x)
npz_path = tmp_path / "data.npz"
np.savez(npz_path, x=expected_x)
with xnumpy_load(npz_path) as f:
x = f["x"]
assert np.array_equal(x, expected_x)
| 0 |
hf_public_repos/datasets | hf_public_repos/datasets/tests/test_filesystem.py | import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, extract_path_from_uri, is_remote_filesystem
from .utils import require_lz4, require_zstandard
def test_mockfs(mockfs):
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def test_non_mockfs():
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def test_extract_path_from_uri():
mock_bucket = "mock-s3-bucket"
dataset_path = f"s3://{mock_bucket}"
dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path.startswith("s3://") is False
dataset_path = "./local/path"
new_dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path == new_dataset_path
def test_is_remote_filesystem(mockfs):
is_remote = is_remote_filesystem(mockfs)
assert is_remote is True
fs = fsspec.filesystem("file")
is_remote = is_remote_filesystem(fs)
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS)
def test_compression_filesystems(compression_fs_class, gz_file, bz2_file, lz4_file, zstd_file, xz_file, text_file):
input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file}
input_path = input_paths[compression_fs_class.protocol]
if input_path is None:
reason = f"for '{compression_fs_class.protocol}' compression protocol, "
if compression_fs_class.protocol == "lz4":
reason += require_lz4.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(reason)
fs = fsspec.filesystem(compression_fs_class.protocol, fo=input_path)
assert isinstance(fs, compression_fs_class)
expected_filename = os.path.basename(input_path)
expected_filename = expected_filename[: expected_filename.rindex(".")]
assert fs.glob("*") == [expected_filename]
with fs.open(expected_filename, "r", encoding="utf-8") as f, open(text_file, encoding="utf-8") as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol", ["zip", "gzip"])
def test_fs_isfile(protocol, zip_jsonl_path, jsonl_gz_path):
compressed_file_paths = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
compressed_file_path = compressed_file_paths[protocol]
member_file_path = "dataset.jsonl"
path = f"{protocol}://{member_file_path}::{compressed_file_path}"
fs, *_ = fsspec.get_fs_token_paths(path)
assert fs.isfile(member_file_path)
assert not fs.isfile("non_existing_" + member_file_path)
def test_fs_overwrites():
protocol = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(protocol, None, clobber=True)
with pytest.warns(UserWarning) as warning_info:
importlib.reload(datasets.filesystems)
assert len(warning_info) == 1
assert (
str(warning_info[0].message)
== f"A filesystem protocol was already set for {protocol} and will be overwritten."
)
| 0 |
hf_public_repos/datasets | hf_public_repos/datasets/tests/test_patching.py | from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def test_patch_submodule():
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
mock = "__test_patch_submodule_mock__"
with patch_submodule(_test_patching, "os.path.join", mock):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os, _PatchedModuleObj)
assert isinstance(_test_patching.os.path, _PatchedModuleObj)
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path, _PatchedModuleObj)
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os, _PatchedModuleObj)
assert isinstance(_test_patching.renamed_os.path, _PatchedModuleObj)
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path, _PatchedModuleObj)
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def test_patch_submodule_builtin():
assert _test_patching.open is open
mock = "__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching, "open", mock):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def test_patch_submodule_missing():
# pandas.read_csv is not present in _test_patching
mock = "__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching, "pandas.read_csv", mock):
pass
def test_patch_submodule_missing_builtin():
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
mock = "__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching, "len", None) is None
with patch_submodule(_test_patching, "len", mock):
assert _test_patching.len is mock
assert _test_patching.len is len
def test_patch_submodule_start_and_stop():
mock = "__test_patch_submodule_start_and_stop_mock__"
patch = patch_submodule(_test_patching, "open", mock)
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def test_patch_submodule_successive():
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
mock_join = "__test_patch_submodule_successive_join__"
mock_dirname = "__test_patch_submodule_successive_dirname__"
mock_rename = "__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching, "os.path.join", mock_join):
with patch_submodule(_test_patching, "os.rename", mock_rename):
with patch_submodule(_test_patching, "os.path.dirname", mock_dirname):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching, "os.rename", mock_rename):
with patch_submodule(_test_patching, "os.path.join", mock_join):
with patch_submodule(_test_patching, "os.path.dirname", mock_dirname):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def test_patch_submodule_doesnt_exist():
mock = "__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching, "__module_that_doesn_exist__.__attribute_that_doesn_exist__", mock):
pass
with patch_submodule(_test_patching, "os.__attribute_that_doesn_exist__", mock):
pass
| 0 |
hf_public_repos/datasets | hf_public_repos/datasets/tests/test_metric.py | import os
import pickle
import tempfile
import time
from multiprocessing import Pool
from unittest import TestCase
import pytest
from datasets.features import Features, Sequence, Value
from datasets.metric import Metric, MetricInfo
from .utils import require_tf, require_torch
class DummyMetric(Metric):
def _info(self):
return MetricInfo(
description="dummy metric for tests",
citation="insert citation here",
features=Features({"predictions": Value("int64"), "references": Value("int64")}),
)
def _compute(self, predictions, references):
return (
{
"accuracy": sum(i == j for i, j in zip(predictions, references)) / len(predictions),
"set_equality": set(predictions) == set(references),
}
if predictions
else {}
)
@classmethod
def predictions_and_references(cls):
return ([1, 2, 3, 4], [1, 2, 4, 3])
@classmethod
def expected_results(cls):
return {"accuracy": 0.5, "set_equality": True}
@classmethod
def other_predictions_and_references(cls):
return ([1, 3, 4, 5], [1, 2, 3, 4])
@classmethod
def other_expected_results(cls):
return {"accuracy": 0.25, "set_equality": False}
@classmethod
def distributed_predictions_and_references(cls):
return ([1, 2, 3, 4], [1, 2, 3, 4]), ([1, 2, 4, 5], [1, 2, 3, 4])
@classmethod
def distributed_expected_results(cls):
return {"accuracy": 0.75, "set_equality": False}
@classmethod
def separate_predictions_and_references(cls):
return ([1, 2, 3, 4], [1, 2, 3, 4]), ([1, 2, 4, 5], [1, 2, 3, 4])
@classmethod
def separate_expected_results(cls):
return [{"accuracy": 1.0, "set_equality": True}, {"accuracy": 0.5, "set_equality": False}]
def properly_del_metric(metric):
"""properly delete a metric on windows if the process is killed during multiprocessing"""
if metric is not None:
if metric.filelock is not None:
metric.filelock.release()
if metric.rendez_vous_lock is not None:
metric.rendez_vous_lock.release()
del metric.writer
del metric.data
del metric
def metric_compute(arg):
"""Thread worker function for distributed evaluation testing.
On base level to be pickable.
"""
metric = None
try:
num_process, process_id, preds, refs, exp_id, cache_dir, wait = arg
metric = DummyMetric(
num_process=num_process, process_id=process_id, experiment_id=exp_id, cache_dir=cache_dir, timeout=5
)
time.sleep(wait)
results = metric.compute(predictions=preds, references=refs)
return results
finally:
properly_del_metric(metric)
def metric_add_batch_and_compute(arg):
"""Thread worker function for distributed evaluation testing.
On base level to be pickable.
"""
metric = None
try:
num_process, process_id, preds, refs, exp_id, cache_dir, wait = arg
metric = DummyMetric(
num_process=num_process, process_id=process_id, experiment_id=exp_id, cache_dir=cache_dir, timeout=5
)
metric.add_batch(predictions=preds, references=refs)
time.sleep(wait)
results = metric.compute()
return results
finally:
properly_del_metric(metric)
def metric_add_and_compute(arg):
"""Thread worker function for distributed evaluation testing.
On base level to be pickable.
"""
metric = None
try:
num_process, process_id, preds, refs, exp_id, cache_dir, wait = arg
metric = DummyMetric(
num_process=num_process, process_id=process_id, experiment_id=exp_id, cache_dir=cache_dir, timeout=5
)
for pred, ref in zip(preds, refs):
metric.add(prediction=pred, reference=ref)
time.sleep(wait)
results = metric.compute()
return results
finally:
properly_del_metric(metric)
@pytest.mark.filterwarnings("ignore:Metric is deprecated:FutureWarning")
class TestMetric(TestCase):
def test_dummy_metric(self):
preds, refs = DummyMetric.predictions_and_references()
expected_results = DummyMetric.expected_results()
metric = DummyMetric(experiment_id="test_dummy_metric")
self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs))
del metric
metric = DummyMetric(experiment_id="test_dummy_metric")
metric.add_batch(predictions=preds, references=refs)
self.assertDictEqual(expected_results, metric.compute())
del metric
metric = DummyMetric(experiment_id="test_dummy_metric")
for pred, ref in zip(preds, refs):
metric.add(prediction=pred, reference=ref)
self.assertDictEqual(expected_results, metric.compute())
del metric
# With keep_in_memory
metric = DummyMetric(keep_in_memory=True, experiment_id="test_dummy_metric")
self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs))
del metric
metric = DummyMetric(keep_in_memory=True, experiment_id="test_dummy_metric")
metric.add_batch(predictions=preds, references=refs)
self.assertDictEqual(expected_results, metric.compute())
del metric
metric = DummyMetric(keep_in_memory=True, experiment_id="test_dummy_metric")
for pred, ref in zip(preds, refs):
metric.add(prediction=pred, reference=ref)
self.assertDictEqual(expected_results, metric.compute())
del metric
metric = DummyMetric(keep_in_memory=True, experiment_id="test_dummy_metric")
self.assertDictEqual({}, metric.compute(predictions=[], references=[]))
del metric
metric = DummyMetric(keep_in_memory=True, experiment_id="test_dummy_metric")
with self.assertRaisesRegex(ValueError, "Mismatch in the number"):
metric.add_batch(predictions=[1, 2, 3], references=[1, 2, 3, 4])
del metric
def test_metric_with_cache_dir(self):
preds, refs = DummyMetric.predictions_and_references()
expected_results = DummyMetric.expected_results()
with tempfile.TemporaryDirectory() as tmp_dir:
metric = DummyMetric(experiment_id="test_dummy_metric", cache_dir=tmp_dir)
self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs))
del metric
def test_concurrent_metrics(self):
preds, refs = DummyMetric.predictions_and_references()
other_preds, other_refs = DummyMetric.other_predictions_and_references()
expected_results = DummyMetric.expected_results()
other_expected_results = DummyMetric.other_expected_results()
metric = DummyMetric(experiment_id="test_concurrent_metrics")
other_metric = DummyMetric(
experiment_id="test_concurrent_metrics",
)
self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs))
self.assertDictEqual(
other_expected_results, other_metric.compute(predictions=other_preds, references=other_refs)
)
del metric, other_metric
metric = DummyMetric(
experiment_id="test_concurrent_metrics",
)
other_metric = DummyMetric(
experiment_id="test_concurrent_metrics",
)
metric.add_batch(predictions=preds, references=refs)
other_metric.add_batch(predictions=other_preds, references=other_refs)
self.assertDictEqual(expected_results, metric.compute())
self.assertDictEqual(other_expected_results, other_metric.compute())
for pred, ref, other_pred, other_ref in zip(preds, refs, other_preds, other_refs):
metric.add(prediction=pred, reference=ref)
other_metric.add(prediction=other_pred, reference=other_ref)
self.assertDictEqual(expected_results, metric.compute())
self.assertDictEqual(other_expected_results, other_metric.compute())
del metric, other_metric
# With keep_in_memory
metric = DummyMetric(experiment_id="test_concurrent_metrics", keep_in_memory=True)
other_metric = DummyMetric(experiment_id="test_concurrent_metrics", keep_in_memory=True)
self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs))
self.assertDictEqual(
other_expected_results, other_metric.compute(predictions=other_preds, references=other_refs)
)
metric = DummyMetric(experiment_id="test_concurrent_metrics", keep_in_memory=True)
other_metric = DummyMetric(experiment_id="test_concurrent_metrics", keep_in_memory=True)
metric.add_batch(predictions=preds, references=refs)
other_metric.add_batch(predictions=other_preds, references=other_refs)
self.assertDictEqual(expected_results, metric.compute())
self.assertDictEqual(other_expected_results, other_metric.compute())
for pred, ref, other_pred, other_ref in zip(preds, refs, other_preds, other_refs):
metric.add(prediction=pred, reference=ref)
other_metric.add(prediction=other_pred, reference=other_ref)
self.assertDictEqual(expected_results, metric.compute())
self.assertDictEqual(other_expected_results, other_metric.compute())
del metric, other_metric
def test_separate_experiments_in_parallel(self):
with tempfile.TemporaryDirectory() as tmp_dir:
(preds_0, refs_0), (preds_1, refs_1) = DummyMetric.separate_predictions_and_references()
expected_results = DummyMetric.separate_expected_results()
pool = Pool(processes=4)
results = pool.map(
metric_compute,
[
(1, 0, preds_0, refs_0, None, tmp_dir, 0),
(1, 0, preds_1, refs_1, None, tmp_dir, 0),
],
)
self.assertDictEqual(expected_results[0], results[0])
self.assertDictEqual(expected_results[1], results[1])
del results
# more than one sec of waiting so that the second metric has to sample a new hashing name
results = pool.map(
metric_compute,
[
(1, 0, preds_0, refs_0, None, tmp_dir, 2),
(1, 0, preds_1, refs_1, None, tmp_dir, 2),
],
)
self.assertDictEqual(expected_results[0], results[0])
self.assertDictEqual(expected_results[1], results[1])
del results
results = pool.map(
metric_add_and_compute,
[
(1, 0, preds_0, refs_0, None, tmp_dir, 0),
(1, 0, preds_1, refs_1, None, tmp_dir, 0),
],
)
self.assertDictEqual(expected_results[0], results[0])
self.assertDictEqual(expected_results[1], results[1])
del results
results = pool.map(
metric_add_batch_and_compute,
[
(1, 0, preds_0, refs_0, None, tmp_dir, 0),
(1, 0, preds_1, refs_1, None, tmp_dir, 0),
],
)
self.assertDictEqual(expected_results[0], results[0])
self.assertDictEqual(expected_results[1], results[1])
del results
def test_distributed_metrics(self):
with tempfile.TemporaryDirectory() as tmp_dir:
(preds_0, refs_0), (preds_1, refs_1) = DummyMetric.distributed_predictions_and_references()
expected_results = DummyMetric.distributed_expected_results()
pool = Pool(processes=4)
results = pool.map(
metric_compute,
[
(2, 0, preds_0, refs_0, "test_distributed_metrics_0", tmp_dir, 0),
(2, 1, preds_1, refs_1, "test_distributed_metrics_0", tmp_dir, 0.5),
],
)
self.assertDictEqual(expected_results, results[0])
self.assertIsNone(results[1])
del results
results = pool.map(
metric_compute,
[
(2, 0, preds_0, refs_0, "test_distributed_metrics_0", tmp_dir, 0.5),
(2, 1, preds_1, refs_1, "test_distributed_metrics_0", tmp_dir, 0),
],
)
self.assertDictEqual(expected_results, results[0])
self.assertIsNone(results[1])
del results
results = pool.map(
metric_add_and_compute,
[
(2, 0, preds_0, refs_0, "test_distributed_metrics_1", tmp_dir, 0),
(2, 1, preds_1, refs_1, "test_distributed_metrics_1", tmp_dir, 0),
],
)
self.assertDictEqual(expected_results, results[0])
self.assertIsNone(results[1])
del results
results = pool.map(
metric_add_batch_and_compute,
[
(2, 0, preds_0, refs_0, "test_distributed_metrics_2", tmp_dir, 0),
(2, 1, preds_1, refs_1, "test_distributed_metrics_2", tmp_dir, 0),
],
)
self.assertDictEqual(expected_results, results[0])
self.assertIsNone(results[1])
del results
# To use several distributed metrics on the same local file system, need to specify an experiment_id
try:
results = pool.map(
metric_add_and_compute,
[
(2, 0, preds_0, refs_0, "test_distributed_metrics_3", tmp_dir, 0),
(2, 1, preds_1, refs_1, "test_distributed_metrics_3", tmp_dir, 0),
(2, 0, preds_0, refs_0, "test_distributed_metrics_3", tmp_dir, 0),
(2, 1, preds_1, refs_1, "test_distributed_metrics_3", tmp_dir, 0),
],
)
except ValueError:
# We are fine with either raising a ValueError or computing well the metric
# Being sure we raise the error would means making the dummy dataset bigger
# and the test longer...
pass
else:
self.assertDictEqual(expected_results, results[0])
self.assertDictEqual(expected_results, results[2])
self.assertIsNone(results[1])
self.assertIsNone(results[3])
del results
results = pool.map(
metric_add_and_compute,
[
(2, 0, preds_0, refs_0, "exp_0", tmp_dir, 0),
(2, 1, preds_1, refs_1, "exp_0", tmp_dir, 0),
(2, 0, preds_0, refs_0, "exp_1", tmp_dir, 0),
(2, 1, preds_1, refs_1, "exp_1", tmp_dir, 0),
],
)
self.assertDictEqual(expected_results, results[0])
self.assertDictEqual(expected_results, results[2])
self.assertIsNone(results[1])
self.assertIsNone(results[3])
del results
# With keep_in_memory is not allowed
with self.assertRaises(ValueError):
DummyMetric(
experiment_id="test_distributed_metrics_4",
keep_in_memory=True,
num_process=2,
process_id=0,
cache_dir=tmp_dir,
)
def test_dummy_metric_pickle(self):
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file = os.path.join(tmp_dir, "metric.pt")
preds, refs = DummyMetric.predictions_and_references()
expected_results = DummyMetric.expected_results()
metric = DummyMetric(experiment_id="test_dummy_metric_pickle")
with open(tmp_file, "wb") as f:
pickle.dump(metric, f)
del metric
with open(tmp_file, "rb") as f:
metric = pickle.load(f)
self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs))
del metric
def test_input_numpy(self):
import numpy as np
preds, refs = DummyMetric.predictions_and_references()
expected_results = DummyMetric.expected_results()
preds, refs = np.array(preds), np.array(refs)
metric = DummyMetric(experiment_id="test_input_numpy")
self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs))
del metric
metric = DummyMetric(experiment_id="test_input_numpy")
metric.add_batch(predictions=preds, references=refs)
self.assertDictEqual(expected_results, metric.compute())
del metric
metric = DummyMetric(experiment_id="test_input_numpy")
for pred, ref in zip(preds, refs):
metric.add(prediction=pred, reference=ref)
self.assertDictEqual(expected_results, metric.compute())
del metric
@require_torch
def test_input_torch(self):
import torch
preds, refs = DummyMetric.predictions_and_references()
expected_results = DummyMetric.expected_results()
preds, refs = torch.tensor(preds), torch.tensor(refs)
metric = DummyMetric(experiment_id="test_input_torch")
self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs))
del metric
metric = DummyMetric(experiment_id="test_input_torch")
metric.add_batch(predictions=preds, references=refs)
self.assertDictEqual(expected_results, metric.compute())
del metric
metric = DummyMetric(experiment_id="test_input_torch")
for pred, ref in zip(preds, refs):
metric.add(prediction=pred, reference=ref)
self.assertDictEqual(expected_results, metric.compute())
del metric
@require_tf
def test_input_tf(self):
import tensorflow as tf
preds, refs = DummyMetric.predictions_and_references()
expected_results = DummyMetric.expected_results()
preds, refs = tf.constant(preds), tf.constant(refs)
metric = DummyMetric(experiment_id="test_input_tf")
self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs))
del metric
metric = DummyMetric(experiment_id="test_input_tf")
metric.add_batch(predictions=preds, references=refs)
self.assertDictEqual(expected_results, metric.compute())
del metric
metric = DummyMetric(experiment_id="test_input_tf")
for pred, ref in zip(preds, refs):
metric.add(prediction=pred, reference=ref)
self.assertDictEqual(expected_results, metric.compute())
del metric
class MetricWithMultiLabel(Metric):
def _info(self):
return MetricInfo(
description="dummy metric for tests",
citation="insert citation here",
features=Features(
{"predictions": Sequence(Value("int64")), "references": Sequence(Value("int64"))}
if self.config_name == "multilabel"
else {"predictions": Value("int64"), "references": Value("int64")}
),
)
def _compute(self, predictions=None, references=None):
return (
{
"accuracy": sum(i == j for i, j in zip(predictions, references)) / len(predictions),
}
if predictions
else {}
)
@pytest.mark.parametrize(
"config_name, predictions, references, expected",
[
(None, [1, 2, 3, 4], [1, 2, 4, 3], 0.5), # Multiclass: Value("int64")
(
"multilabel",
[[1, 0], [1, 0], [1, 0], [1, 0]],
[[1, 0], [0, 1], [1, 1], [0, 0]],
0.25,
), # Multilabel: Sequence(Value("int64"))
],
)
def test_metric_with_multilabel(config_name, predictions, references, expected, tmp_path):
cache_dir = tmp_path / "cache"
metric = MetricWithMultiLabel(config_name, cache_dir=cache_dir)
results = metric.compute(predictions=predictions, references=references)
assert results["accuracy"] == expected
def test_safety_checks_process_vars():
with pytest.raises(ValueError):
_ = DummyMetric(process_id=-2)
with pytest.raises(ValueError):
_ = DummyMetric(num_process=2, process_id=3)
class AccuracyWithNonStandardFeatureNames(Metric):
def _info(self):
return MetricInfo(
description="dummy metric for tests",
citation="insert citation here",
features=Features({"inputs": Value("int64"), "targets": Value("int64")}),
)
def _compute(self, inputs, targets):
return (
{
"accuracy": sum(i == j for i, j in zip(inputs, targets)) / len(targets),
}
if targets
else {}
)
@classmethod
def inputs_and_targets(cls):
return ([1, 2, 3, 4], [1, 2, 4, 3])
@classmethod
def expected_results(cls):
return {"accuracy": 0.5}
def test_metric_with_non_standard_feature_names_add(tmp_path):
cache_dir = tmp_path / "cache"
inputs, targets = AccuracyWithNonStandardFeatureNames.inputs_and_targets()
metric = AccuracyWithNonStandardFeatureNames(cache_dir=cache_dir)
for input, target in zip(inputs, targets):
metric.add(inputs=input, targets=target)
results = metric.compute()
assert results == AccuracyWithNonStandardFeatureNames.expected_results()
def test_metric_with_non_standard_feature_names_add_batch(tmp_path):
cache_dir = tmp_path / "cache"
inputs, targets = AccuracyWithNonStandardFeatureNames.inputs_and_targets()
metric = AccuracyWithNonStandardFeatureNames(cache_dir=cache_dir)
metric.add_batch(inputs=inputs, targets=targets)
results = metric.compute()
assert results == AccuracyWithNonStandardFeatureNames.expected_results()
def test_metric_with_non_standard_feature_names_compute(tmp_path):
cache_dir = tmp_path / "cache"
inputs, targets = AccuracyWithNonStandardFeatureNames.inputs_and_targets()
metric = AccuracyWithNonStandardFeatureNames(cache_dir=cache_dir)
results = metric.compute(inputs=inputs, targets=targets)
assert results == AccuracyWithNonStandardFeatureNames.expected_results()
| 0 |
hf_public_repos/datasets | hf_public_repos/datasets/tests/test_py_utils.py | import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def np_sum(x): # picklable for multiprocessing
return x.sum()
def add_one(i): # picklable for multiprocessing
return i + 1
@dataclass
class A:
x: int
y: str
class PyUtilsTest(TestCase):
def test_map_nested(self):
s1 = {}
s2 = []
s3 = 1
s4 = [1, 2]
s5 = {"a": 1, "b": 2}
s6 = {"a": [1, 2], "b": [3, 4]}
s7 = {"a": {"1": 1}, "b": 2}
s8 = {"a": 1, "b": 2, "c": 3, "d": 4}
expected_map_nested_s1 = {}
expected_map_nested_s2 = []
expected_map_nested_s3 = 2
expected_map_nested_s4 = [2, 3]
expected_map_nested_s5 = {"a": 2, "b": 3}
expected_map_nested_s6 = {"a": [2, 3], "b": [4, 5]}
expected_map_nested_s7 = {"a": {"1": 2}, "b": 3}
expected_map_nested_s8 = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(add_one, s1), expected_map_nested_s1)
self.assertEqual(map_nested(add_one, s2), expected_map_nested_s2)
self.assertEqual(map_nested(add_one, s3), expected_map_nested_s3)
self.assertEqual(map_nested(add_one, s4), expected_map_nested_s4)
self.assertEqual(map_nested(add_one, s5), expected_map_nested_s5)
self.assertEqual(map_nested(add_one, s6), expected_map_nested_s6)
self.assertEqual(map_nested(add_one, s7), expected_map_nested_s7)
self.assertEqual(map_nested(add_one, s8), expected_map_nested_s8)
num_proc = 2
self.assertEqual(map_nested(add_one, s1, num_proc=num_proc), expected_map_nested_s1)
self.assertEqual(map_nested(add_one, s2, num_proc=num_proc), expected_map_nested_s2)
self.assertEqual(map_nested(add_one, s3, num_proc=num_proc), expected_map_nested_s3)
self.assertEqual(map_nested(add_one, s4, num_proc=num_proc), expected_map_nested_s4)
self.assertEqual(map_nested(add_one, s5, num_proc=num_proc), expected_map_nested_s5)
self.assertEqual(map_nested(add_one, s6, num_proc=num_proc), expected_map_nested_s6)
self.assertEqual(map_nested(add_one, s7, num_proc=num_proc), expected_map_nested_s7)
self.assertEqual(map_nested(add_one, s8, num_proc=num_proc), expected_map_nested_s8)
sn1 = {"a": np.eye(2), "b": np.zeros(3), "c": np.ones(2)}
expected_map_nested_sn1_sum = {"a": 2, "b": 0, "c": 2}
expected_map_nested_sn1_int = {
"a": np.eye(2).astype(int),
"b": np.zeros(3).astype(int),
"c": np.ones(2).astype(int),
}
self.assertEqual(map_nested(np_sum, sn1, map_numpy=False), expected_map_nested_sn1_sum)
self.assertEqual(
{k: v.tolist() for k, v in map_nested(int, sn1, map_numpy=True).items()},
{k: v.tolist() for k, v in expected_map_nested_sn1_int.items()},
)
self.assertEqual(map_nested(np_sum, sn1, map_numpy=False, num_proc=num_proc), expected_map_nested_sn1_sum)
self.assertEqual(
{k: v.tolist() for k, v in map_nested(int, sn1, map_numpy=True, num_proc=num_proc).items()},
{k: v.tolist() for k, v in expected_map_nested_sn1_int.items()},
)
with self.assertRaises(AttributeError): # can't pickle a local lambda
map_nested(lambda x: x + 1, sn1, num_proc=num_proc)
def test_zip_dict(self):
d1 = {"a": 1, "b": 2}
d2 = {"a": 3, "b": 4}
d3 = {"a": 5, "b": 6}
expected_zip_dict_result = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))])
self.assertEqual(sorted(zip_dict(d1, d2, d3)), expected_zip_dict_result)
def test_temporary_assignment(self):
class Foo:
my_attr = "bar"
foo = Foo()
self.assertEqual(foo.my_attr, "bar")
with temporary_assignment(foo, "my_attr", "BAR"):
self.assertEqual(foo.my_attr, "BAR")
self.assertEqual(foo.my_attr, "bar")
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc",
[
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
],
)
def test_map_nested_num_proc(iterable_length, num_proc, expected_num_proc):
with patch("datasets.utils.py_utils._single_map_nested") as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool"
) as mock_multiprocessing_pool:
data_struct = {f"{i}": i for i in range(iterable_length)}
_ = map_nested(lambda x: x + 10, data_struct, num_proc=num_proc, parallel_min_length=16)
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class TempSeedTest(TestCase):
@require_tf
def test_tensorflow(self):
import tensorflow as tf
from tensorflow.keras import layers
model = layers.Dense(2)
def gen_random_output():
x = tf.random.uniform((1, 3))
return model(x).numpy()
with temp_seed(42, set_tensorflow=True):
out1 = gen_random_output()
with temp_seed(42, set_tensorflow=True):
out2 = gen_random_output()
out3 = gen_random_output()
np.testing.assert_equal(out1, out2)
self.assertGreater(np.abs(out1 - out3).sum(), 0)
@require_torch
def test_torch(self):
import torch
def gen_random_output():
model = torch.nn.Linear(3, 2)
x = torch.rand(1, 3)
return model(x).detach().numpy()
with temp_seed(42, set_pytorch=True):
out1 = gen_random_output()
with temp_seed(42, set_pytorch=True):
out2 = gen_random_output()
out3 = gen_random_output()
np.testing.assert_equal(out1, out2)
self.assertGreater(np.abs(out1 - out3).sum(), 0)
def test_numpy(self):
def gen_random_output():
return np.random.rand(1, 3)
with temp_seed(42):
out1 = gen_random_output()
with temp_seed(42):
out2 = gen_random_output()
out3 = gen_random_output()
np.testing.assert_equal(out1, out2)
self.assertGreater(np.abs(out1 - out3).sum(), 0)
@pytest.mark.parametrize("input_data", [{}])
def test_nested_data_structure_data(input_data):
output_data = NestedDataStructure(input_data).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output",
[
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
],
)
def test_flatten(data, expected_output):
output = NestedDataStructure(data).flatten()
assert output == expected_output
def test_asdict():
input = A(x=1, y="foobar")
expected_output = {"x": 1, "y": "foobar"}
assert asdict(input) == expected_output
input = {"a": {"b": A(x=10, y="foo")}, "c": [A(x=20, y="bar")]}
expected_output = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(input) == expected_output
with pytest.raises(TypeError):
asdict([1, A(x=10, y="foo")])
def _split_text(text: str):
return text.split()
def _2seconds_generator_of_2items_with_timing(content):
yield (time.time(), content)
time.sleep(2)
yield (time.time(), content)
def test_iflatmap_unordered():
with Pool(2) as pool:
out = list(iflatmap_unordered(pool, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10))
assert out.count("hello") == 10
assert out.count("there") == 10
assert len(out) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2) as pool:
out = list(iflatmap_unordered(pool, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10))
assert out.count("hello") == 10
assert out.count("there") == 10
assert len(out) == 20
# check that we get items as fast as possible
with Pool(2) as pool:
out = []
for yield_time, content in iflatmap_unordered(
pool, _2seconds_generator_of_2items_with_timing, kwargs_iterable=[{"content": "a"}, {"content": "b"}]
):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(content)
assert out.count("a") == 2
assert out.count("b") == 2
assert len(out) == 4
| 0 |
hf_public_repos/datasets/tests | hf_public_repos/datasets/tests/distributed_scripts/run_torch_distributed.py | import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
NUM_SHARDS = 4
NUM_ITEMS_PER_SHARD = 3
class FailedTestError(RuntimeError):
pass
def gen(shards: List[str]):
for shard in shards:
for i in range(NUM_ITEMS_PER_SHARD):
yield {"i": i, "shard": shard}
def main():
rank = int(os.environ["RANK"])
world_size = int(os.environ["WORLD_SIZE"])
parser = ArgumentParser()
parser.add_argument("--streaming", type=bool)
parser.add_argument("--local_rank", type=int)
parser.add_argument("--num_workers", type=int, default=0)
args = parser.parse_args()
streaming = args.streaming
num_workers = args.num_workers
gen_kwargs = {"shards": [f"shard_{shard_idx}" for shard_idx in range(NUM_SHARDS)]}
ds = IterableDataset.from_generator(gen, gen_kwargs=gen_kwargs)
if not streaming:
ds = Dataset.from_list(list(ds))
ds = split_dataset_by_node(ds, rank=rank, world_size=world_size)
dataloader = torch.utils.data.DataLoader(ds, num_workers=num_workers)
full_size = NUM_SHARDS * NUM_ITEMS_PER_SHARD
expected_local_size = full_size // world_size
expected_local_size += int(rank < (full_size % world_size))
local_size = sum(1 for _ in dataloader)
if local_size != expected_local_size:
raise FailedTestError(f"local_size {local_size} != expected_local_size {expected_local_size}")
if __name__ == "__main__":
main()
| 0 |
hf_public_repos/datasets/tests | hf_public_repos/datasets/tests/io/test_sql.py | import contextlib
import os
import sqlite3
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def _check_sql_dataset(dataset, expected_features):
assert isinstance(dataset, Dataset)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_dataset_from_sql_keep_in_memory(keep_in_memory, sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = SqlDatasetReader(
"dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory
).read()
_check_sql_dataset(dataset, expected_features)
@require_sqlalchemy
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_dataset_from_sql_features(features, sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning):
cache_dir = tmp_path / "cache"
default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, features=features, cache_dir=cache_dir).read()
_check_sql_dataset(dataset, expected_features)
def iter_sql_file(sqlite_path):
with contextlib.closing(sqlite3.connect(sqlite_path)) as con:
cur = con.cursor()
cur.execute("SELECT * FROM dataset")
for row in cur:
yield row
@require_sqlalchemy
def test_dataset_to_sql(sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning):
cache_dir = tmp_path / "cache"
output_sqlite_path = os.path.join(cache_dir, "tmp.sql")
dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir).read()
SqlDatasetWriter(dataset, "dataset", "sqlite:///" + output_sqlite_path, num_proc=1).write()
original_sql = iter_sql_file(sqlite_path)
expected_sql = iter_sql_file(output_sqlite_path)
for row1, row2 in zip(original_sql, expected_sql):
assert row1 == row2
@require_sqlalchemy
def test_dataset_to_sql_multiproc(sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning):
cache_dir = tmp_path / "cache"
output_sqlite_path = os.path.join(cache_dir, "tmp.sql")
dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir).read()
SqlDatasetWriter(dataset, "dataset", "sqlite:///" + output_sqlite_path, num_proc=2).write()
original_sql = iter_sql_file(sqlite_path)
expected_sql = iter_sql_file(output_sqlite_path)
for row1, row2 in zip(original_sql, expected_sql):
assert row1 == row2
@require_sqlalchemy
def test_dataset_to_sql_invalidproc(sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning):
cache_dir = tmp_path / "cache"
output_sqlite_path = os.path.join(cache_dir, "tmp.sql")
dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir).read()
with pytest.raises(ValueError):
SqlDatasetWriter(dataset, "dataset", "sqlite:///" + output_sqlite_path, num_proc=0).write()
| 0 |
hf_public_repos/datasets/tests | hf_public_repos/datasets/tests/io/test_json.py | import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _check_json_dataset(dataset, expected_features):
assert isinstance(dataset, Dataset)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_dataset_from_json_keep_in_memory(keep_in_memory, jsonl_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = JsonDatasetReader(jsonl_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read()
_check_json_dataset(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_dataset_from_json_features(features, jsonl_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = JsonDatasetReader(jsonl_path, features=features, cache_dir=cache_dir).read()
_check_json_dataset(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
],
)
def test_dataset_from_json_with_unsorted_column_names(features, jsonl_312_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = JsonDatasetReader(jsonl_312_path, features=features, cache_dir=cache_dir).read()
assert isinstance(dataset, Dataset)
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def test_dataset_from_json_with_mismatched_features(jsonl_312_path, tmp_path):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
features = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
expected_features = features.copy()
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
cache_dir = tmp_path / "cache"
dataset = JsonDatasetReader(jsonl_312_path, features=features, cache_dir=cache_dir).read()
assert isinstance(dataset, Dataset)
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_dataset_from_json_split(split, jsonl_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
dataset = JsonDatasetReader(jsonl_path, cache_dir=cache_dir, split=split).read()
_check_json_dataset(dataset, expected_features)
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type", [str, list])
def test_dataset_from_json_path_type(path_type, jsonl_path, tmp_path):
if issubclass(path_type, str):
path = jsonl_path
elif issubclass(path_type, list):
path = [jsonl_path]
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
dataset = JsonDatasetReader(path, cache_dir=cache_dir).read()
_check_json_dataset(dataset, expected_features)
def _check_json_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_datasetdict_from_json_keep_in_memory(keep_in_memory, jsonl_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = JsonDatasetReader({"train": jsonl_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read()
_check_json_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_datasetdict_from_json_features(features, jsonl_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = JsonDatasetReader({"train": jsonl_path}, features=features, cache_dir=cache_dir).read()
_check_json_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_datasetdict_from_json_splits(split, jsonl_path, tmp_path):
if split:
path = {split: jsonl_path}
else:
split = "train"
path = {"train": jsonl_path, "test": jsonl_path}
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
dataset = JsonDatasetReader(path, cache_dir=cache_dir).read()
_check_json_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def load_json(buffer):
return json.load(buffer)
def load_json_lines(buffer):
return [json.loads(line) for line in buffer]
class TestJsonDatasetWriter:
@pytest.mark.parametrize("lines, load_json_function", [(True, load_json_lines), (False, load_json)])
def test_dataset_to_json_lines(self, lines, load_json_function, dataset):
with io.BytesIO() as buffer:
JsonDatasetWriter(dataset, buffer, lines=lines).write()
buffer.seek(0)
exported_content = load_json_function(buffer)
assert isinstance(exported_content, list)
assert isinstance(exported_content[0], dict)
assert len(exported_content) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at",
[
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789"), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
],
)
def test_dataset_to_json_orient(self, orient, container, keys, len_at, dataset):
with io.BytesIO() as buffer:
JsonDatasetWriter(dataset, buffer, lines=False, orient=orient).write()
buffer.seek(0)
exported_content = load_json(buffer)
assert isinstance(exported_content, container)
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(exported_content, "keys") and not hasattr(exported_content[0], "keys")
if len_at:
assert len(exported_content[len_at]) == 10
else:
assert len(exported_content) == 10
@pytest.mark.parametrize("lines, load_json_function", [(True, load_json_lines), (False, load_json)])
def test_dataset_to_json_lines_multiproc(self, lines, load_json_function, dataset):
with io.BytesIO() as buffer:
JsonDatasetWriter(dataset, buffer, lines=lines, num_proc=2).write()
buffer.seek(0)
exported_content = load_json_function(buffer)
assert isinstance(exported_content, list)
assert isinstance(exported_content[0], dict)
assert len(exported_content) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at",
[
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789"), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
],
)
def test_dataset_to_json_orient_multiproc(self, orient, container, keys, len_at, dataset):
with io.BytesIO() as buffer:
JsonDatasetWriter(dataset, buffer, lines=False, orient=orient, num_proc=2).write()
buffer.seek(0)
exported_content = load_json(buffer)
assert isinstance(exported_content, container)
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(exported_content, "keys") and not hasattr(exported_content[0], "keys")
if len_at:
assert len(exported_content[len_at]) == 10
else:
assert len(exported_content) == 10
def test_dataset_to_json_orient_invalidproc(self, dataset):
with pytest.raises(ValueError):
with io.BytesIO() as buffer:
JsonDatasetWriter(dataset, buffer, num_proc=0)
@pytest.mark.parametrize("compression, extension", [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")])
def test_dataset_to_json_compression(self, shared_datadir, tmp_path_factory, extension, compression, dataset):
path = tmp_path_factory.mktemp("data") / f"test.json.{extension}"
original_path = str(shared_datadir / f"test_file.json.{extension}")
JsonDatasetWriter(dataset, path, compression=compression).write()
with fsspec.open(path, "rb", compression="infer") as f:
exported_content = f.read()
with fsspec.open(original_path, "rb", compression="infer") as f:
original_content = f.read()
assert exported_content == original_content
| 0 |
hf_public_repos/datasets/tests | hf_public_repos/datasets/tests/io/test_parquet.py | import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _check_parquet_dataset(dataset, expected_features):
assert isinstance(dataset, Dataset)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_dataset_from_parquet_keep_in_memory(keep_in_memory, parquet_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = ParquetDatasetReader(parquet_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read()
_check_parquet_dataset(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_dataset_from_parquet_features(features, parquet_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = ParquetDatasetReader(parquet_path, features=features, cache_dir=cache_dir).read()
_check_parquet_dataset(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_dataset_from_parquet_split(split, parquet_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
dataset = ParquetDatasetReader(parquet_path, cache_dir=cache_dir, split=split).read()
_check_parquet_dataset(dataset, expected_features)
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type", [str, list])
def test_dataset_from_parquet_path_type(path_type, parquet_path, tmp_path):
if issubclass(path_type, str):
path = parquet_path
elif issubclass(path_type, list):
path = [parquet_path]
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
dataset = ParquetDatasetReader(path, cache_dir=cache_dir).read()
_check_parquet_dataset(dataset, expected_features)
def _check_parquet_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_parquet_datasetdict_reader_keep_in_memory(keep_in_memory, parquet_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = ParquetDatasetReader(
{"train": parquet_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory
).read()
_check_parquet_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_parquet_datasetdict_reader_features(features, parquet_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = ParquetDatasetReader({"train": parquet_path}, features=features, cache_dir=cache_dir).read()
_check_parquet_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_parquet_datasetdict_reader_split(split, parquet_path, tmp_path):
if split:
path = {split: parquet_path}
else:
split = "train"
path = {"train": parquet_path, "test": parquet_path}
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
dataset = ParquetDatasetReader(path, cache_dir=cache_dir).read()
_check_parquet_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def test_parquet_write(dataset, tmp_path):
writer = ParquetDatasetWriter(dataset, tmp_path / "foo.parquet")
assert writer.write() > 0
pf = pq.ParquetFile(tmp_path / "foo.parquet")
output_table = pf.read()
assert dataset.data.table == output_table
def test_dataset_to_parquet_keeps_features(shared_datadir, tmp_path):
image_path = str(shared_datadir / "test_image_rgb.jpg")
data = {"image": [image_path]}
features = Features({"image": Image()})
dataset = Dataset.from_dict(data, features=features)
writer = ParquetDatasetWriter(dataset, tmp_path / "foo.parquet")
assert writer.write() > 0
reloaded_dataset = Dataset.from_parquet(str(tmp_path / "foo.parquet"))
assert dataset.features == reloaded_dataset.features
reloaded_iterable_dataset = ParquetDatasetReader(str(tmp_path / "foo.parquet"), streaming=True).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected",
[
(Features({"foo": Value("int32")}), None),
(Features({"image": Image(), "foo": Value("int32")}), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio())}), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
],
)
def test_get_writer_batch_size(feature, expected):
assert get_writer_batch_size(feature) == expected
| 0 |
hf_public_repos/datasets/tests | hf_public_repos/datasets/tests/io/test_text.py | import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _check_text_dataset(dataset, expected_features):
assert isinstance(dataset, Dataset)
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_dataset_from_text_keep_in_memory(keep_in_memory, text_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = TextDatasetReader(text_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read()
_check_text_dataset(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
],
)
def test_dataset_from_text_features(features, text_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"text": "string"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = TextDatasetReader(text_path, features=features, cache_dir=cache_dir).read()
_check_text_dataset(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_dataset_from_text_split(split, text_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"text": "string"}
dataset = TextDatasetReader(text_path, cache_dir=cache_dir, split=split).read()
_check_text_dataset(dataset, expected_features)
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type", [str, list])
def test_dataset_from_text_path_type(path_type, text_path, tmp_path):
if issubclass(path_type, str):
path = text_path
elif issubclass(path_type, list):
path = [text_path]
cache_dir = tmp_path / "cache"
expected_features = {"text": "string"}
dataset = TextDatasetReader(path, cache_dir=cache_dir).read()
_check_text_dataset(dataset, expected_features)
def _check_text_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_datasetdict_from_text_keep_in_memory(keep_in_memory, text_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = TextDatasetReader({"train": text_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read()
_check_text_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
],
)
def test_datasetdict_from_text_features(features, text_path, tmp_path):
cache_dir = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
default_expected_features = {"text": "string"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = TextDatasetReader({"train": text_path}, features=features, cache_dir=cache_dir).read()
_check_text_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_datasetdict_from_text_split(split, text_path, tmp_path):
if split:
path = {split: text_path}
else:
split = "train"
path = {"train": text_path, "test": text_path}
cache_dir = tmp_path / "cache"
expected_features = {"text": "string"}
dataset = TextDatasetReader(path, cache_dir=cache_dir).read()
_check_text_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
| 0 |
hf_public_repos/datasets/tests | hf_public_repos/datasets/tests/io/test_csv.py | import csv
import os
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.csv import CsvDatasetReader, CsvDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _check_csv_dataset(dataset, expected_features):
assert isinstance(dataset, Dataset)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_dataset_from_csv_keep_in_memory(keep_in_memory, csv_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = CsvDatasetReader(csv_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read()
_check_csv_dataset(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_dataset_from_csv_features(features, csv_path, tmp_path):
cache_dir = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
default_expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = CsvDatasetReader(csv_path, features=features, cache_dir=cache_dir).read()
_check_csv_dataset(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_dataset_from_csv_split(split, csv_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
dataset = CsvDatasetReader(csv_path, cache_dir=cache_dir, split=split).read()
_check_csv_dataset(dataset, expected_features)
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type", [str, list])
def test_dataset_from_csv_path_type(path_type, csv_path, tmp_path):
if issubclass(path_type, str):
path = csv_path
elif issubclass(path_type, list):
path = [csv_path]
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
dataset = CsvDatasetReader(path, cache_dir=cache_dir).read()
_check_csv_dataset(dataset, expected_features)
def _check_csv_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_csv_datasetdict_reader_keep_in_memory(keep_in_memory, csv_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = CsvDatasetReader({"train": csv_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read()
_check_csv_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_csv_datasetdict_reader_features(features, csv_path, tmp_path):
cache_dir = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
default_expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = CsvDatasetReader({"train": csv_path}, features=features, cache_dir=cache_dir).read()
_check_csv_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_csv_datasetdict_reader_split(split, csv_path, tmp_path):
if split:
path = {split: csv_path}
else:
path = {"train": csv_path, "test": csv_path}
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
dataset = CsvDatasetReader(path, cache_dir=cache_dir).read()
_check_csv_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def iter_csv_file(csv_path):
with open(csv_path, encoding="utf-8") as csvfile:
yield from csv.reader(csvfile)
def test_dataset_to_csv(csv_path, tmp_path):
cache_dir = tmp_path / "cache"
output_csv = os.path.join(cache_dir, "tmp.csv")
dataset = CsvDatasetReader({"train": csv_path}, cache_dir=cache_dir).read()
CsvDatasetWriter(dataset["train"], output_csv, num_proc=1).write()
original_csv = iter_csv_file(csv_path)
expected_csv = iter_csv_file(output_csv)
for row1, row2 in zip(original_csv, expected_csv):
assert row1 == row2
def test_dataset_to_csv_multiproc(csv_path, tmp_path):
cache_dir = tmp_path / "cache"
output_csv = os.path.join(cache_dir, "tmp.csv")
dataset = CsvDatasetReader({"train": csv_path}, cache_dir=cache_dir).read()
CsvDatasetWriter(dataset["train"], output_csv, num_proc=2).write()
original_csv = iter_csv_file(csv_path)
expected_csv = iter_csv_file(output_csv)
for row1, row2 in zip(original_csv, expected_csv):
assert row1 == row2
def test_dataset_to_csv_invalidproc(csv_path, tmp_path):
cache_dir = tmp_path / "cache"
output_csv = os.path.join(cache_dir, "tmp.csv")
dataset = CsvDatasetReader({"train": csv_path}, cache_dir=cache_dir).read()
with pytest.raises(ValueError):
CsvDatasetWriter(dataset["train"], output_csv, num_proc=0)
| 0 |
hf_public_repos/datasets/tests | hf_public_repos/datasets/tests/commands/test_test.py | import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_TestCommandArgs = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def is_1percent_close(source, target):
return (abs(source - target) / target) < 0.01
@pytest.mark.integration
def test_test_command(dataset_loading_script_dir):
args = _TestCommandArgs(dataset=dataset_loading_script_dir, all_configs=True, save_infos=True)
test_command = TestCommand(*args)
test_command.run()
dataset_readme_path = os.path.join(dataset_loading_script_dir, "README.md")
assert os.path.exists(dataset_readme_path)
dataset_infos = DatasetInfosDict.from_directory(dataset_loading_script_dir)
expected_dataset_infos = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string")),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"])
),
"langs": Sequence(Value("string")),
"spans": Sequence(Value("string")),
}
),
splits=[
{
"name": "train",
"num_bytes": 2351563,
"num_examples": 10000,
},
{
"name": "validation",
"num_bytes": 238418,
"num_examples": 1000,
},
],
download_size=3940680,
dataset_size=2589981,
)
}
)
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
result, expected = getattr(dataset_infos["default"], key), getattr(expected_dataset_infos["default"], key)
if key == "num_bytes":
assert is_1percent_close(result, expected)
elif key == "splits":
assert list(result) == list(expected)
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_1percent_close(result[split].num_bytes, expected[split].num_bytes)
else:
result == expected
| 0 |
hf_public_repos/datasets/tests | hf_public_repos/datasets/tests/commands/conftest.py | import pytest
DATASET_LOADING_SCRIPT_NAME = "__dummy_dataset1__"
DATASET_LOADING_SCRIPT_CODE = """
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def dataset_loading_script_name():
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def dataset_loading_script_code():
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def dataset_loading_script_dir(dataset_loading_script_name, dataset_loading_script_code, tmp_path):
script_name = dataset_loading_script_name
script_dir = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=True)
script_path = script_dir / f"{script_name}.py"
with open(script_path, "w") as f:
f.write(dataset_loading_script_code)
return str(script_dir)
| 0 |
hf_public_repos/datasets/tests | hf_public_repos/datasets/tests/features/test_array_xd.py | import os
import random
import tempfile
import unittest
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from absl.testing import parameterized
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features import Array2D, Array3D, Array4D, Array5D, Value
from datasets.features.features import Array3DExtensionType, PandasArrayExtensionDtype, _ArrayXD
from datasets.formatting.formatting import NumpyArrowExtractor, SimpleArrowExtractor
SHAPE_TEST_1 = (30, 487)
SHAPE_TEST_2 = (36, 1024)
SHAPE_TEST_3 = (None, 100)
SPEED_TEST_SHAPE = (100, 100)
SPEED_TEST_N_EXAMPLES = 100
DEFAULT_FEATURES = datasets.Features(
{
"text": Array2D(SHAPE_TEST_1, dtype="float32"),
"image": Array2D(SHAPE_TEST_2, dtype="float32"),
"dynamic": Array2D(SHAPE_TEST_3, dtype="float32"),
}
)
def generate_examples(features: dict, num_examples=100, seq_shapes=None):
dummy_data = []
seq_shapes = seq_shapes or {}
for i in range(num_examples):
example = {}
for col_id, (k, v) in enumerate(features.items()):
if isinstance(v, _ArrayXD):
if k == "dynamic":
first_dim = random.randint(1, 3)
data = np.random.rand(first_dim, *v.shape[1:]).astype(v.dtype)
else:
data = np.random.rand(*v.shape).astype(v.dtype)
elif isinstance(v, datasets.Value):
data = "foo"
elif isinstance(v, datasets.Sequence):
while isinstance(v, datasets.Sequence):
v = v.feature
shape = seq_shapes[k]
data = np.random.rand(*shape).astype(v.dtype)
example[k] = data
dummy_data.append((i, example))
return dummy_data
class ExtensionTypeCompatibilityTest(unittest.TestCase):
def test_array2d_nonspecific_shape(self):
with tempfile.TemporaryDirectory() as tmp_dir:
my_features = DEFAULT_FEATURES.copy()
with ArrowWriter(features=my_features, path=os.path.join(tmp_dir, "beta.arrow")) as writer:
for key, record in generate_examples(
features=my_features,
num_examples=1,
):
example = my_features.encode_example(record)
writer.write(example)
num_examples, num_bytes = writer.finalize()
dataset = datasets.Dataset.from_file(os.path.join(tmp_dir, "beta.arrow"))
dataset.set_format("numpy")
row = dataset[0]
first_shape = row["image"].shape
second_shape = row["text"].shape
self.assertTrue(first_shape is not None and second_shape is not None, "need atleast 2 different shapes")
self.assertEqual(len(first_shape), len(second_shape), "both shapes are supposed to be equal length")
self.assertNotEqual(first_shape, second_shape, "shapes must not be the same")
del dataset
def test_multiple_extensions_same_row(self):
with tempfile.TemporaryDirectory() as tmp_dir:
my_features = DEFAULT_FEATURES.copy()
with ArrowWriter(features=my_features, path=os.path.join(tmp_dir, "beta.arrow")) as writer:
for key, record in generate_examples(features=my_features, num_examples=1):
example = my_features.encode_example(record)
writer.write(example)
num_examples, num_bytes = writer.finalize()
dataset = datasets.Dataset.from_file(os.path.join(tmp_dir, "beta.arrow"))
dataset.set_format("numpy")
row = dataset[0]
first_len = len(row["image"].shape)
second_len = len(row["text"].shape)
third_len = len(row["dynamic"].shape)
self.assertEqual(first_len, 2, "use a sequence type if dim is < 2")
self.assertEqual(second_len, 2, "use a sequence type if dim is < 2")
self.assertEqual(third_len, 2, "use a sequence type if dim is < 2")
del dataset
def test_compatability_with_string_values(self):
with tempfile.TemporaryDirectory() as tmp_dir:
my_features = DEFAULT_FEATURES.copy()
my_features["image_id"] = datasets.Value("string")
with ArrowWriter(features=my_features, path=os.path.join(tmp_dir, "beta.arrow")) as writer:
for key, record in generate_examples(features=my_features, num_examples=1):
example = my_features.encode_example(record)
writer.write(example)
num_examples, num_bytes = writer.finalize()
dataset = datasets.Dataset.from_file(os.path.join(tmp_dir, "beta.arrow"))
self.assertIsInstance(dataset[0]["image_id"], str, "image id must be of type string")
del dataset
def test_extension_indexing(self):
with tempfile.TemporaryDirectory() as tmp_dir:
my_features = DEFAULT_FEATURES.copy()
my_features["explicit_ext"] = Array2D((3, 3), dtype="float32")
with ArrowWriter(features=my_features, path=os.path.join(tmp_dir, "beta.arrow")) as writer:
for key, record in generate_examples(features=my_features, num_examples=1):
example = my_features.encode_example(record)
writer.write(example)
num_examples, num_bytes = writer.finalize()
dataset = datasets.Dataset.from_file(os.path.join(tmp_dir, "beta.arrow"))
dataset.set_format("numpy")
data = dataset[0]["explicit_ext"]
self.assertIsInstance(data, np.ndarray, "indexed extension must return numpy.ndarray")
del dataset
def get_array_feature_types():
shape_1 = [3] * 5
shape_2 = [3, 4, 5, 6, 7]
return [
{
"testcase_name": f"{d}d",
"array_feature": array_feature,
"shape_1": tuple(shape_1[:d]),
"shape_2": tuple(shape_2[:d]),
}
for d, array_feature in zip(range(2, 6), [Array2D, Array3D, Array4D, Array5D])
]
@parameterized.named_parameters(get_array_feature_types())
class ArrayXDTest(unittest.TestCase):
def get_features(self, array_feature, shape_1, shape_2):
return datasets.Features(
{
"image": array_feature(shape_1, dtype="float32"),
"source": Value("string"),
"matrix": array_feature(shape_2, dtype="float32"),
}
)
def get_dict_example_0(self, shape_1, shape_2):
return {
"image": np.random.rand(*shape_1).astype("float32"),
"source": "foo",
"matrix": np.random.rand(*shape_2).astype("float32"),
}
def get_dict_example_1(self, shape_1, shape_2):
return {
"image": np.random.rand(*shape_1).astype("float32"),
"matrix": np.random.rand(*shape_2).astype("float32"),
"source": "bar",
}
def get_dict_examples(self, shape_1, shape_2):
return {
"image": np.random.rand(2, *shape_1).astype("float32").tolist(),
"source": ["foo", "bar"],
"matrix": np.random.rand(2, *shape_2).astype("float32").tolist(),
}
def _check_getitem_output_type(self, dataset, shape_1, shape_2, first_matrix):
matrix_column = dataset["matrix"]
self.assertIsInstance(matrix_column, list)
self.assertIsInstance(matrix_column[0], list)
self.assertIsInstance(matrix_column[0][0], list)
self.assertTupleEqual(np.array(matrix_column).shape, (2, *shape_2))
matrix_field_of_first_example = dataset[0]["matrix"]
self.assertIsInstance(matrix_field_of_first_example, list)
self.assertIsInstance(matrix_field_of_first_example, list)
self.assertEqual(np.array(matrix_field_of_first_example).shape, shape_2)
np.testing.assert_array_equal(np.array(matrix_field_of_first_example), np.array(first_matrix))
matrix_field_of_first_two_examples = dataset[:2]["matrix"]
self.assertIsInstance(matrix_field_of_first_two_examples, list)
self.assertIsInstance(matrix_field_of_first_two_examples[0], list)
self.assertIsInstance(matrix_field_of_first_two_examples[0][0], list)
self.assertTupleEqual(np.array(matrix_field_of_first_two_examples).shape, (2, *shape_2))
with dataset.formatted_as("numpy"):
self.assertTupleEqual(dataset["matrix"].shape, (2, *shape_2))
self.assertEqual(dataset[0]["matrix"].shape, shape_2)
self.assertTupleEqual(dataset[:2]["matrix"].shape, (2, *shape_2))
with dataset.formatted_as("pandas"):
self.assertIsInstance(dataset["matrix"], pd.Series)
self.assertIsInstance(dataset[0]["matrix"], pd.Series)
self.assertIsInstance(dataset[:2]["matrix"], pd.Series)
self.assertTupleEqual(dataset["matrix"].to_numpy().shape, (2, *shape_2))
self.assertTupleEqual(dataset[0]["matrix"].to_numpy().shape, (1, *shape_2))
self.assertTupleEqual(dataset[:2]["matrix"].to_numpy().shape, (2, *shape_2))
def test_write(self, array_feature, shape_1, shape_2):
with tempfile.TemporaryDirectory() as tmp_dir:
my_features = self.get_features(array_feature, shape_1, shape_2)
my_examples = [
(0, self.get_dict_example_0(shape_1, shape_2)),
(1, self.get_dict_example_1(shape_1, shape_2)),
]
with ArrowWriter(features=my_features, path=os.path.join(tmp_dir, "beta.arrow")) as writer:
for key, record in my_examples:
example = my_features.encode_example(record)
writer.write(example)
num_examples, num_bytes = writer.finalize()
dataset = datasets.Dataset.from_file(os.path.join(tmp_dir, "beta.arrow"))
self._check_getitem_output_type(dataset, shape_1, shape_2, my_examples[0][1]["matrix"])
del dataset
def test_write_batch(self, array_feature, shape_1, shape_2):
with tempfile.TemporaryDirectory() as tmp_dir:
my_features = self.get_features(array_feature, shape_1, shape_2)
dict_examples = self.get_dict_examples(shape_1, shape_2)
dict_examples = my_features.encode_batch(dict_examples)
with ArrowWriter(features=my_features, path=os.path.join(tmp_dir, "beta.arrow")) as writer:
writer.write_batch(dict_examples)
num_examples, num_bytes = writer.finalize()
dataset = datasets.Dataset.from_file(os.path.join(tmp_dir, "beta.arrow"))
self._check_getitem_output_type(dataset, shape_1, shape_2, dict_examples["matrix"][0])
del dataset
def test_from_dict(self, array_feature, shape_1, shape_2):
dict_examples = self.get_dict_examples(shape_1, shape_2)
dataset = datasets.Dataset.from_dict(
dict_examples, features=self.get_features(array_feature, shape_1, shape_2)
)
self._check_getitem_output_type(dataset, shape_1, shape_2, dict_examples["matrix"][0])
del dataset
class ArrayXDDynamicTest(unittest.TestCase):
def get_one_col_dataset(self, first_dim_list, fixed_shape):
features = datasets.Features({"image": Array3D(shape=(None, *fixed_shape), dtype="float32")})
dict_values = {"image": [np.random.rand(fdim, *fixed_shape).astype("float32") for fdim in first_dim_list]}
dataset = datasets.Dataset.from_dict(dict_values, features=features)
return dataset
def get_two_col_datasset(self, first_dim_list, fixed_shape):
features = datasets.Features(
{"image": Array3D(shape=(None, *fixed_shape), dtype="float32"), "text": Value("string")}
)
dict_values = {
"image": [np.random.rand(fdim, *fixed_shape).astype("float32") for fdim in first_dim_list],
"text": ["text" for _ in first_dim_list],
}
dataset = datasets.Dataset.from_dict(dict_values, features=features)
return dataset
def test_to_pylist(self):
fixed_shape = (2, 2)
first_dim_list = [1, 3, 10]
dataset = self.get_one_col_dataset(first_dim_list, fixed_shape)
arr_xd = SimpleArrowExtractor().extract_column(dataset._data)
self.assertIsInstance(arr_xd.type, Array3DExtensionType)
pylist = arr_xd.to_pylist()
for first_dim, single_arr in zip(first_dim_list, pylist):
self.assertIsInstance(single_arr, list)
self.assertTupleEqual(np.array(single_arr).shape, (first_dim, *fixed_shape))
def test_to_numpy(self):
fixed_shape = (2, 2)
# ragged
first_dim_list = [1, 3, 10]
dataset = self.get_one_col_dataset(first_dim_list, fixed_shape)
arr_xd = SimpleArrowExtractor().extract_column(dataset._data)
self.assertIsInstance(arr_xd.type, Array3DExtensionType)
# replace with arr_xd = arr_xd.combine_chunks() when 12.0.0 will be the minimal required PyArrow version
arr_xd = arr_xd.type.wrap_array(pa.concat_arrays([chunk.storage for chunk in arr_xd.chunks]))
numpy_arr = arr_xd.to_numpy()
self.assertIsInstance(numpy_arr, np.ndarray)
self.assertEqual(numpy_arr.dtype, object)
for first_dim, single_arr in zip(first_dim_list, numpy_arr):
self.assertIsInstance(single_arr, np.ndarray)
self.assertTupleEqual(single_arr.shape, (first_dim, *fixed_shape))
# non-ragged
first_dim_list = [4, 4, 4]
dataset = self.get_one_col_dataset(first_dim_list, fixed_shape)
arr_xd = SimpleArrowExtractor().extract_column(dataset._data)
self.assertIsInstance(arr_xd.type, Array3DExtensionType)
# replace with arr_xd = arr_xd.combine_chunks() when 12.0.0 will be the minimal required PyArrow version
arr_xd = arr_xd.type.wrap_array(pa.concat_arrays([chunk.storage for chunk in arr_xd.chunks]))
numpy_arr = arr_xd.to_numpy()
self.assertIsInstance(numpy_arr, np.ndarray)
self.assertNotEqual(numpy_arr.dtype, object)
for first_dim, single_arr in zip(first_dim_list, numpy_arr):
self.assertIsInstance(single_arr, np.ndarray)
self.assertTupleEqual(single_arr.shape, (first_dim, *fixed_shape))
def test_iter_dataset(self):
fixed_shape = (2, 2)
first_dim_list = [1, 3, 10]
dataset = self.get_one_col_dataset(first_dim_list, fixed_shape)
for first_dim, ds_row in zip(first_dim_list, dataset):
single_arr = ds_row["image"]
self.assertIsInstance(single_arr, list)
self.assertTupleEqual(np.array(single_arr).shape, (first_dim, *fixed_shape))
def test_to_pandas(self):
fixed_shape = (2, 2)
# ragged
first_dim_list = [1, 3, 10]
dataset = self.get_one_col_dataset(first_dim_list, fixed_shape)
df = dataset.to_pandas()
self.assertEqual(type(df.image.dtype), PandasArrayExtensionDtype)
numpy_arr = df.image.to_numpy()
self.assertIsInstance(numpy_arr, np.ndarray)
self.assertEqual(numpy_arr.dtype, object)
for first_dim, single_arr in zip(first_dim_list, numpy_arr):
self.assertIsInstance(single_arr, np.ndarray)
self.assertTupleEqual(single_arr.shape, (first_dim, *fixed_shape))
# non-ragged
first_dim_list = [4, 4, 4]
dataset = self.get_one_col_dataset(first_dim_list, fixed_shape)
df = dataset.to_pandas()
self.assertEqual(type(df.image.dtype), PandasArrayExtensionDtype)
numpy_arr = df.image.to_numpy()
self.assertIsInstance(numpy_arr, np.ndarray)
self.assertNotEqual(numpy_arr.dtype, object)
for first_dim, single_arr in zip(first_dim_list, numpy_arr):
self.assertIsInstance(single_arr, np.ndarray)
self.assertTupleEqual(single_arr.shape, (first_dim, *fixed_shape))
def test_map_dataset(self):
fixed_shape = (2, 2)
first_dim_list = [1, 3, 10]
dataset = self.get_one_col_dataset(first_dim_list, fixed_shape)
dataset = dataset.map(lambda a: {"image": np.concatenate([a] * 2)}, input_columns="image")
# check also if above function resulted with 2x bigger first dim
for first_dim, ds_row in zip(first_dim_list, dataset):
single_arr = ds_row["image"]
self.assertIsInstance(single_arr, list)
self.assertTupleEqual(np.array(single_arr).shape, (first_dim * 2, *fixed_shape))
@pytest.mark.parametrize("dtype, dummy_value", [("int32", 1), ("bool", True), ("float64", 1)])
def test_table_to_pandas(dtype, dummy_value):
features = datasets.Features({"foo": datasets.Array2D(dtype=dtype, shape=(2, 2))})
dataset = datasets.Dataset.from_dict({"foo": [[[dummy_value] * 2] * 2]}, features=features)
df = dataset._data.to_pandas()
assert type(df.foo.dtype) == PandasArrayExtensionDtype
arr = df.foo.to_numpy()
np.testing.assert_equal(arr, np.array([[[dummy_value] * 2] * 2], dtype=np.dtype(dtype)))
@pytest.mark.parametrize("dtype, dummy_value", [("int32", 1), ("bool", True), ("float64", 1)])
def test_array_xd_numpy_arrow_extractor(dtype, dummy_value):
features = datasets.Features({"foo": datasets.Array2D(dtype=dtype, shape=(2, 2))})
dataset = datasets.Dataset.from_dict({"foo": [[[dummy_value] * 2] * 2]}, features=features)
arr = NumpyArrowExtractor().extract_column(dataset._data)
assert isinstance(arr, np.ndarray)
np.testing.assert_equal(arr, np.array([[[dummy_value] * 2] * 2], dtype=np.dtype(dtype)))
def test_array_xd_with_none():
# Fixed shape
features = datasets.Features({"foo": datasets.Array2D(dtype="int32", shape=(2, 2))})
dummy_array = np.array([[1, 2], [3, 4]], dtype="int32")
dataset = datasets.Dataset.from_dict({"foo": [dummy_array, None, dummy_array, None]}, features=features)
arr = NumpyArrowExtractor().extract_column(dataset._data)
assert isinstance(arr, np.ndarray) and arr.dtype == np.float64 and arr.shape == (4, 2, 2)
assert np.allclose(arr[0], dummy_array) and np.allclose(arr[2], dummy_array)
assert np.all(np.isnan(arr[1])) and np.all(np.isnan(arr[3])) # broadcasted np.nan - use np.all
# Dynamic shape
features = datasets.Features({"foo": datasets.Array2D(dtype="int32", shape=(None, 2))})
dummy_array = np.array([[1, 2], [3, 4]], dtype="int32")
dataset = datasets.Dataset.from_dict({"foo": [dummy_array, None, dummy_array, None]}, features=features)
arr = NumpyArrowExtractor().extract_column(dataset._data)
assert isinstance(arr, np.ndarray) and arr.dtype == object and arr.shape == (4,)
np.testing.assert_equal(arr[0], dummy_array)
np.testing.assert_equal(arr[2], dummy_array)
assert np.isnan(arr[1]) and np.isnan(arr[3]) # a single np.nan value - np.all not needed
@pytest.mark.parametrize("seq_type", ["no_sequence", "sequence", "sequence_of_sequence"])
@pytest.mark.parametrize(
"dtype",
[
"bool",
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float16",
"float32",
"float64",
],
)
@pytest.mark.parametrize("shape, feature_class", [((2, 3), datasets.Array2D), ((2, 3, 4), datasets.Array3D)])
def test_array_xd_with_np(seq_type, dtype, shape, feature_class):
feature = feature_class(dtype=dtype, shape=shape)
data = np.zeros(shape, dtype=dtype)
expected = data.tolist()
if seq_type == "sequence":
feature = datasets.Sequence(feature)
data = [data]
expected = [expected]
elif seq_type == "sequence_of_sequence":
feature = datasets.Sequence(datasets.Sequence(feature))
data = [[data]]
expected = [[expected]]
ds = datasets.Dataset.from_dict({"col": [data]}, features=datasets.Features({"col": feature}))
assert ds[0]["col"] == expected
@pytest.mark.parametrize("with_none", [False, True])
def test_dataset_map(with_none):
ds = datasets.Dataset.from_dict({"path": ["path1", "path2"]})
def process_data(batch):
batch = {
"image": [
np.array(
[
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
[[10, 20, 30], [40, 50, 60], [70, 80, 90]],
[[100, 200, 300], [400, 500, 600], [700, 800, 900]],
]
)
for _ in batch["path"]
]
}
if with_none:
batch["image"][0] = None
return batch
features = datasets.Features({"image": Array3D(dtype="int32", shape=(3, 3, 3))})
processed_ds = ds.map(process_data, batched=True, remove_columns=ds.column_names, features=features)
assert processed_ds.shape == (2, 1)
with processed_ds.with_format("numpy") as pds:
for i, example in enumerate(pds):
assert "image" in example
assert isinstance(example["image"], np.ndarray)
assert example["image"].shape == (3, 3, 3)
if with_none and i == 0:
assert np.all(np.isnan(example["image"]))
| 0 |
hf_public_repos/datasets/tests | hf_public_repos/datasets/tests/features/test_image.py | import os
import tarfile
import warnings
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from datasets import Dataset, Features, Image, Sequence, Value, concatenate_datasets, load_dataset
from datasets.features.image import encode_np_array, image_to_bytes
from ..utils import require_pil
@pytest.fixture
def tar_jpg_path(shared_datadir, tmp_path_factory):
image_path = str(shared_datadir / "test_image_rgb.jpg")
path = tmp_path_factory.mktemp("data") / "image_data.jpg.tar"
with tarfile.TarFile(path, "w") as f:
f.add(image_path, arcname=os.path.basename(image_path))
return path
def iter_archive(archive_path):
with tarfile.open(archive_path) as tar:
for tarinfo in tar:
file_path = tarinfo.name
file_obj = tar.extractfile(tarinfo)
yield file_path, file_obj
def test_image_instantiation():
image = Image()
assert image.id is None
assert image.dtype == "PIL.Image.Image"
assert image.pa_type == pa.struct({"bytes": pa.binary(), "path": pa.string()})
assert image._type == "Image"
def test_image_feature_type_to_arrow():
features = Features({"image": Image()})
assert features.arrow_schema == pa.schema({"image": Image().pa_type})
features = Features({"struct_containing_an_image": {"image": Image()}})
assert features.arrow_schema == pa.schema({"struct_containing_an_image": pa.struct({"image": Image().pa_type})})
features = Features({"sequence_of_images": Sequence(Image())})
assert features.arrow_schema == pa.schema({"sequence_of_images": pa.list_(Image().pa_type)})
@require_pil
@pytest.mark.parametrize(
"build_example",
[
lambda image_path: image_path,
lambda image_path: open(image_path, "rb").read(),
lambda image_path: {"path": image_path},
lambda image_path: {"path": image_path, "bytes": None},
lambda image_path: {"path": image_path, "bytes": open(image_path, "rb").read()},
lambda image_path: {"path": None, "bytes": open(image_path, "rb").read()},
lambda image_path: {"bytes": open(image_path, "rb").read()},
],
)
def test_image_feature_encode_example(shared_datadir, build_example):
import PIL.Image
image_path = str(shared_datadir / "test_image_rgb.jpg")
image = Image()
encoded_example = image.encode_example(build_example(image_path))
assert isinstance(encoded_example, dict)
assert encoded_example.keys() == {"bytes", "path"}
assert encoded_example["bytes"] is not None or encoded_example["path"] is not None
decoded_example = image.decode_example(encoded_example)
assert isinstance(decoded_example, PIL.Image.Image)
@require_pil
def test_image_decode_example(shared_datadir):
import PIL.Image
image_path = str(shared_datadir / "test_image_rgb.jpg")
image = Image()
decoded_example = image.decode_example({"path": image_path, "bytes": None})
assert isinstance(decoded_example, PIL.Image.Image)
assert os.path.samefile(decoded_example.filename, image_path)
assert decoded_example.size == (640, 480)
assert decoded_example.mode == "RGB"
with pytest.raises(RuntimeError):
Image(decode=False).decode_example(image_path)
@require_pil
def test_dataset_with_image_feature(shared_datadir):
import PIL.Image
image_path = str(shared_datadir / "test_image_rgb.jpg")
data = {"image": [image_path]}
features = Features({"image": Image()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"image"}
assert isinstance(item["image"], PIL.Image.Image)
assert os.path.samefile(item["image"].filename, image_path)
assert item["image"].format == "JPEG"
assert item["image"].size == (640, 480)
assert item["image"].mode == "RGB"
batch = dset[:1]
assert len(batch) == 1
assert batch.keys() == {"image"}
assert isinstance(batch["image"], list) and all(isinstance(item, PIL.Image.Image) for item in batch["image"])
assert os.path.samefile(batch["image"][0].filename, image_path)
assert batch["image"][0].format == "JPEG"
assert batch["image"][0].size == (640, 480)
assert batch["image"][0].mode == "RGB"
column = dset["image"]
assert len(column) == 1
assert isinstance(column, list) and all(isinstance(item, PIL.Image.Image) for item in column)
assert os.path.samefile(column[0].filename, image_path)
assert column[0].format == "JPEG"
assert column[0].size == (640, 480)
assert column[0].mode == "RGB"
@require_pil
@pytest.mark.parametrize("infer_feature", [False, True])
def test_dataset_with_image_feature_from_pil_image(infer_feature, shared_datadir):
import PIL.Image
image_path = str(shared_datadir / "test_image_rgb.jpg")
data = {"image": [PIL.Image.open(image_path)]}
features = Features({"image": Image()}) if not infer_feature else None
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"image"}
assert isinstance(item["image"], PIL.Image.Image)
assert os.path.samefile(item["image"].filename, image_path)
assert item["image"].format == "JPEG"
assert item["image"].size == (640, 480)
assert item["image"].mode == "RGB"
batch = dset[:1]
assert len(batch) == 1
assert batch.keys() == {"image"}
assert isinstance(batch["image"], list) and all(isinstance(item, PIL.Image.Image) for item in batch["image"])
assert os.path.samefile(batch["image"][0].filename, image_path)
assert batch["image"][0].format == "JPEG"
assert batch["image"][0].size == (640, 480)
assert batch["image"][0].mode == "RGB"
column = dset["image"]
assert len(column) == 1
assert isinstance(column, list) and all(isinstance(item, PIL.Image.Image) for item in column)
assert os.path.samefile(column[0].filename, image_path)
assert column[0].format == "JPEG"
assert column[0].size == (640, 480)
assert column[0].mode == "RGB"
@require_pil
def test_dataset_with_image_feature_from_np_array():
import PIL.Image
image_array = np.arange(640 * 480, dtype=np.int32).reshape(480, 640)
data = {"image": [image_array]}
features = Features({"image": Image()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"image"}
assert isinstance(item["image"], PIL.Image.Image)
np.testing.assert_array_equal(np.array(item["image"]), image_array)
assert item["image"].filename == ""
assert item["image"].format in ["PNG", "TIFF"]
assert item["image"].size == (640, 480)
batch = dset[:1]
assert len(batch) == 1
assert batch.keys() == {"image"}
assert isinstance(batch["image"], list) and all(isinstance(item, PIL.Image.Image) for item in batch["image"])
np.testing.assert_array_equal(np.array(batch["image"][0]), image_array)
assert batch["image"][0].filename == ""
assert batch["image"][0].format in ["PNG", "TIFF"]
assert batch["image"][0].size == (640, 480)
column = dset["image"]
assert len(column) == 1
assert isinstance(column, list) and all(isinstance(item, PIL.Image.Image) for item in column)
np.testing.assert_array_equal(np.array(column[0]), image_array)
assert column[0].filename == ""
assert column[0].format in ["PNG", "TIFF"]
assert column[0].size == (640, 480)
@require_pil
def test_dataset_with_image_feature_tar_jpg(tar_jpg_path):
import PIL.Image
data = {"image": []}
for file_path, file_obj in iter_archive(tar_jpg_path):
data["image"].append({"path": file_path, "bytes": file_obj.read()})
break
features = Features({"image": Image()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"image"}
assert isinstance(item["image"], PIL.Image.Image)
assert item["image"].filename == ""
assert item["image"].format == "JPEG"
assert item["image"].size == (640, 480)
assert item["image"].mode == "RGB"
batch = dset[:1]
assert len(batch) == 1
assert batch.keys() == {"image"}
assert isinstance(batch["image"], list) and all(isinstance(item, PIL.Image.Image) for item in batch["image"])
assert batch["image"][0].filename == ""
assert batch["image"][0].format == "JPEG"
assert batch["image"][0].size == (640, 480)
assert batch["image"][0].mode == "RGB"
column = dset["image"]
assert len(column) == 1
assert isinstance(column, list) and all(isinstance(item, PIL.Image.Image) for item in column)
assert column[0].filename == ""
assert column[0].format == "JPEG"
assert column[0].size == (640, 480)
assert column[0].mode == "RGB"
@require_pil
def test_dataset_with_image_feature_with_none():
data = {"image": [None]}
features = Features({"image": Image()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"image"}
assert item["image"] is None
batch = dset[:1]
assert len(batch) == 1
assert batch.keys() == {"image"}
assert isinstance(batch["image"], list) and all(item is None for item in batch["image"])
column = dset["image"]
assert len(column) == 1
assert isinstance(column, list) and all(item is None for item in column)
# nested tests
data = {"images": [[None]]}
features = Features({"images": Sequence(Image())})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"images"}
assert all(i is None for i in item["images"])
data = {"nested": [{"image": None}]}
features = Features({"nested": {"image": Image()}})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"nested"}
assert item["nested"].keys() == {"image"}
assert item["nested"]["image"] is None
@require_pil
@pytest.mark.parametrize(
"build_data",
[
lambda image_path: {"image": [image_path]},
lambda image_path: {"image": [open(image_path, "rb").read()]},
lambda image_path: {"image": [{"path": image_path}]},
lambda image_path: {"image": [{"path": image_path, "bytes": None}]},
lambda image_path: {"image": [{"path": image_path, "bytes": open(image_path, "rb").read()}]},
lambda image_path: {"image": [{"path": None, "bytes": open(image_path, "rb").read()}]},
lambda image_path: {"image": [{"bytes": open(image_path, "rb").read()}]},
],
)
def test_dataset_cast_to_image_features(shared_datadir, build_data):
import PIL.Image
image_path = str(shared_datadir / "test_image_rgb.jpg")
data = build_data(image_path)
dset = Dataset.from_dict(data)
item = dset.cast(Features({"image": Image()}))[0]
assert item.keys() == {"image"}
assert isinstance(item["image"], PIL.Image.Image)
item = dset.cast_column("image", Image())[0]
assert item.keys() == {"image"}
assert isinstance(item["image"], PIL.Image.Image)
@require_pil
def test_dataset_concatenate_image_features(shared_datadir):
# we use a different data structure between 1 and 2 to make sure they are compatible with each other
image_path = str(shared_datadir / "test_image_rgb.jpg")
data1 = {"image": [image_path]}
dset1 = Dataset.from_dict(data1, features=Features({"image": Image()}))
data2 = {"image": [{"bytes": open(image_path, "rb").read()}]}
dset2 = Dataset.from_dict(data2, features=Features({"image": Image()}))
concatenated_dataset = concatenate_datasets([dset1, dset2])
assert len(concatenated_dataset) == len(dset1) + len(dset2)
assert concatenated_dataset[0]["image"] == dset1[0]["image"]
assert concatenated_dataset[1]["image"] == dset2[0]["image"]
@require_pil
def test_dataset_concatenate_nested_image_features(shared_datadir):
# we use a different data structure between 1 and 2 to make sure they are compatible with each other
image_path = str(shared_datadir / "test_image_rgb.jpg")
features = Features({"list_of_structs_of_images": [{"image": Image()}]})
data1 = {"list_of_structs_of_images": [[{"image": image_path}]]}
dset1 = Dataset.from_dict(data1, features=features)
data2 = {"list_of_structs_of_images": [[{"image": {"bytes": open(image_path, "rb").read()}}]]}
dset2 = Dataset.from_dict(data2, features=features)
concatenated_dataset = concatenate_datasets([dset1, dset2])
assert len(concatenated_dataset) == len(dset1) + len(dset2)
assert (
concatenated_dataset[0]["list_of_structs_of_images"][0]["image"]
== dset1[0]["list_of_structs_of_images"][0]["image"]
)
assert (
concatenated_dataset[1]["list_of_structs_of_images"][0]["image"]
== dset2[0]["list_of_structs_of_images"][0]["image"]
)
@require_pil
def test_dataset_with_image_feature_map(shared_datadir):
image_path = str(shared_datadir / "test_image_rgb.jpg")
data = {"image": [image_path], "caption": ["cats sleeping"]}
features = Features({"image": Image(), "caption": Value("string")})
dset = Dataset.from_dict(data, features=features)
for item in dset.cast_column("image", Image(decode=False)):
assert item.keys() == {"image", "caption"}
assert item == {"image": {"path": image_path, "bytes": None}, "caption": "cats sleeping"}
# no decoding
def process_caption(example):
example["caption"] = "Two " + example["caption"]
return example
processed_dset = dset.map(process_caption)
for item in processed_dset.cast_column("image", Image(decode=False)):
assert item.keys() == {"image", "caption"}
assert item == {"image": {"path": image_path, "bytes": None}, "caption": "Two cats sleeping"}
# decoding example
def process_image_by_example(example):
example["mode"] = example["image"].mode
return example
decoded_dset = dset.map(process_image_by_example)
for item in decoded_dset.cast_column("image", Image(decode=False)):
assert item.keys() == {"image", "caption", "mode"}
assert os.path.samefile(item["image"]["path"], image_path)
assert item["caption"] == "cats sleeping"
assert item["mode"] == "RGB"
# decoding batch
def process_image_by_batch(batch):
batch["mode"] = [image.mode for image in batch["image"]]
return batch
decoded_dset = dset.map(process_image_by_batch, batched=True)
for item in decoded_dset.cast_column("image", Image(decode=False)):
assert item.keys() == {"image", "caption", "mode"}
assert os.path.samefile(item["image"]["path"], image_path)
assert item["caption"] == "cats sleeping"
assert item["mode"] == "RGB"
@require_pil
def test_formatted_dataset_with_image_feature_map(shared_datadir):
image_path = str(shared_datadir / "test_image_rgb.jpg")
pil_image = Image().decode_example({"path": image_path, "bytes": None})
data = {"image": [image_path], "caption": ["cats sleeping"]}
features = Features({"image": Image(), "caption": Value("string")})
dset = Dataset.from_dict(data, features=features)
for item in dset.cast_column("image", Image(decode=False)):
assert item.keys() == {"image", "caption"}
assert item == {"image": {"path": image_path, "bytes": None}, "caption": "cats sleeping"}
def process_image_by_example(example):
example["num_channels"] = example["image"].shape[-1]
return example
decoded_dset = dset.with_format("numpy").map(process_image_by_example)
for item in decoded_dset.cast_column("image", Image(decode=False)):
assert item.keys() == {"image", "caption", "num_channels"}
assert item["image"] == encode_np_array(np.array(pil_image))
assert item["caption"] == "cats sleeping"
assert item["num_channels"] == 3
def process_image_by_batch(batch):
batch["num_channels"] = [image.shape[-1] for image in batch["image"]]
return batch
decoded_dset = dset.with_format("numpy").map(process_image_by_batch, batched=True)
for item in decoded_dset.cast_column("image", Image(decode=False)):
assert item.keys() == {"image", "caption", "num_channels"}
assert item["image"] == encode_np_array(np.array(pil_image))
assert item["caption"] == "cats sleeping"
assert item["num_channels"] == 3
@require_pil
def test_dataset_with_image_feature_map_change_image(shared_datadir):
import PIL.Image
image_path = str(shared_datadir / "test_image_rgb.jpg")
pil_image = Image().decode_example({"path": image_path, "bytes": None})
data = {"image": [image_path]}
features = Features({"image": Image()})
dset = Dataset.from_dict(data, features=features)
for item in dset.cast_column("image", Image(decode=False)):
assert item.keys() == {"image"}
assert item == {
"image": {
"bytes": None,
"path": image_path,
}
}
# return pil image
def process_image_resize_by_example(example):
example["image"] = example["image"].resize((100, 100))
return example
decoded_dset = dset.map(process_image_resize_by_example)
for item in decoded_dset.cast_column("image", Image(decode=False)):
assert item.keys() == {"image"}
assert item == {"image": {"bytes": image_to_bytes(pil_image.resize((100, 100))), "path": None}}
def process_image_resize_by_batch(batch):
batch["image"] = [image.resize((100, 100)) for image in batch["image"]]
return batch
decoded_dset = dset.map(process_image_resize_by_batch, batched=True)
for item in decoded_dset.cast_column("image", Image(decode=False)):
assert item.keys() == {"image"}
assert item == {"image": {"bytes": image_to_bytes(pil_image.resize((100, 100))), "path": None}}
# return np.ndarray (e.g. when using albumentations)
def process_image_resize_by_example_return_np_array(example):
example["image"] = np.array(example["image"].resize((100, 100)))
return example
decoded_dset = dset.map(process_image_resize_by_example_return_np_array)
for item in decoded_dset.cast_column("image", Image(decode=False)):
assert item.keys() == {"image"}
assert item == {
"image": {
"bytes": image_to_bytes(PIL.Image.fromarray(np.array(pil_image.resize((100, 100))))),
"path": None,
}
}
def process_image_resize_by_batch_return_np_array(batch):
batch["image"] = [np.array(image.resize((100, 100))) for image in batch["image"]]
return batch
decoded_dset = dset.map(process_image_resize_by_batch_return_np_array, batched=True)
for item in decoded_dset.cast_column("image", Image(decode=False)):
assert item.keys() == {"image"}
assert item == {
"image": {
"bytes": image_to_bytes(PIL.Image.fromarray(np.array(pil_image.resize((100, 100))))),
"path": None,
}
}
@require_pil
def test_formatted_dataset_with_image_feature(shared_datadir):
import PIL.Image
image_path = str(shared_datadir / "test_image_rgb.jpg")
data = {"image": [image_path, image_path]}
features = Features({"image": Image()})
dset = Dataset.from_dict(data, features=features)
with dset.formatted_as("numpy"):
item = dset[0]
assert item.keys() == {"image"}
assert isinstance(item["image"], np.ndarray)
assert item["image"].shape == (480, 640, 3)
batch = dset[:1]
assert batch.keys() == {"image"}
assert len(batch) == 1
assert isinstance(batch["image"], np.ndarray)
assert batch["image"].shape == (1, 480, 640, 3)
column = dset["image"]
assert len(column) == 2
assert isinstance(column, np.ndarray)
assert column.shape == (2, 480, 640, 3)
with dset.formatted_as("pandas"):
item = dset[0]
assert item.shape == (1, 1)
assert item.columns == ["image"]
assert isinstance(item["image"][0], PIL.Image.Image)
assert os.path.samefile(item["image"][0].filename, image_path)
assert item["image"][0].format == "JPEG"
assert item["image"][0].size == (640, 480)
assert item["image"][0].mode == "RGB"
batch = dset[:1]
assert batch.shape == (1, 1)
assert batch.columns == ["image"]
assert isinstance(batch["image"], pd.Series) and all(
isinstance(item, PIL.Image.Image) for item in batch["image"]
)
assert os.path.samefile(batch["image"][0].filename, image_path)
assert batch["image"][0].format == "JPEG"
assert batch["image"][0].size == (640, 480)
assert batch["image"][0].mode == "RGB"
column = dset["image"]
assert len(column) == 2
assert isinstance(column, pd.Series) and all(isinstance(item, PIL.Image.Image) for item in column)
assert os.path.samefile(column[0].filename, image_path)
assert column[0].format == "JPEG"
assert column[0].size == (640, 480)
assert column[0].mode == "RGB"
# Currently, the JSONL reader doesn't support complex feature types so we create a temporary dataset script
# to test streaming (without uploading the test dataset to the hub).
DATASET_LOADING_SCRIPT_NAME = "__dummy_dataset__"
DATASET_LOADING_SCRIPT_CODE = """
import os
import datasets
from datasets import DatasetInfo, Features, Image, Split, SplitGenerator, Value
class __DummyDataset__(datasets.GeneratorBasedBuilder):
def _info(self) -> DatasetInfo:
return DatasetInfo(features=Features({"image": Image(), "caption": Value("string")}))
def _split_generators(self, dl_manager):
return [
SplitGenerator(Split.TRAIN, gen_kwargs={"filepath": os.path.join(dl_manager.manual_dir, "train.txt")}),
]
def _generate_examples(self, filepath, **kwargs):
with open(filepath, encoding="utf-8") as f:
for i, line in enumerate(f):
image_path, caption = line.split(",")
yield i, {"image": image_path.strip(), "caption": caption.strip()}
"""
@pytest.fixture
def data_dir(shared_datadir, tmp_path):
data_dir = tmp_path / "dummy_dataset_data"
data_dir.mkdir()
image_path = str(shared_datadir / "test_image_rgb.jpg")
with open(data_dir / "train.txt", "w") as f:
f.write(f"{image_path},Two cats sleeping\n")
return str(data_dir)
@pytest.fixture
def dataset_loading_script_dir(tmp_path):
script_name = DATASET_LOADING_SCRIPT_NAME
script_dir = tmp_path / script_name
script_dir.mkdir()
script_path = script_dir / f"{script_name}.py"
with open(script_path, "w") as f:
f.write(DATASET_LOADING_SCRIPT_CODE)
return str(script_dir)
@require_pil
@pytest.mark.parametrize("streaming", [False, True])
def test_load_dataset_with_image_feature(shared_datadir, data_dir, dataset_loading_script_dir, streaming):
import PIL.Image
image_path = str(shared_datadir / "test_image_rgb.jpg")
dset = load_dataset(dataset_loading_script_dir, split="train", data_dir=data_dir, streaming=streaming)
item = dset[0] if not streaming else next(iter(dset))
assert item.keys() == {"image", "caption"}
assert isinstance(item["image"], PIL.Image.Image)
assert os.path.samefile(item["image"].filename, image_path)
assert item["image"].format == "JPEG"
assert item["image"].size == (640, 480)
assert item["image"].mode == "RGB"
@require_pil
def test_dataset_with_image_feature_undecoded(shared_datadir):
image_path = str(shared_datadir / "test_image_rgb.jpg")
data = {"image": [image_path]}
features = Features({"image": Image(decode=False)})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"image"}
assert item["image"] == {"path": image_path, "bytes": None}
batch = dset[:1]
assert batch.keys() == {"image"}
assert len(batch["image"]) == 1
assert batch["image"][0] == {"path": image_path, "bytes": None}
column = dset["image"]
assert len(column) == 1
assert column[0] == {"path": image_path, "bytes": None}
@require_pil
def test_formatted_dataset_with_image_feature_undecoded(shared_datadir):
image_path = str(shared_datadir / "test_image_rgb.jpg")
data = {"image": [image_path]}
features = Features({"image": Image(decode=False)})
dset = Dataset.from_dict(data, features=features)
with dset.formatted_as("numpy"):
item = dset[0]
assert item.keys() == {"image"}
assert item["image"] == {"path": image_path, "bytes": None}
batch = dset[:1]
assert batch.keys() == {"image"}
assert len(batch["image"]) == 1
assert batch["image"][0] == {"path": image_path, "bytes": None}
column = dset["image"]
assert len(column) == 1
assert column[0] == {"path": image_path, "bytes": None}
with dset.formatted_as("pandas"):
item = dset[0]
assert item.shape == (1, 1)
assert item.columns == ["image"]
assert item["image"][0] == {"path": image_path, "bytes": None}
batch = dset[:1]
assert batch.shape == (1, 1)
assert batch.columns == ["image"]
assert batch["image"][0] == {"path": image_path, "bytes": None}
column = dset["image"]
assert len(column) == 1
assert column[0] == {"path": image_path, "bytes": None}
@require_pil
def test_dataset_with_image_feature_map_undecoded(shared_datadir):
image_path = str(shared_datadir / "test_image_rgb.jpg")
data = {"image": [image_path]}
features = Features({"image": Image(decode=False)})
dset = Dataset.from_dict(data, features=features)
def assert_image_example_undecoded(example):
assert example["image"] == {"path": image_path, "bytes": None}
dset.map(assert_image_example_undecoded)
def assert_image_batch_undecoded(batch):
for image in batch["image"]:
assert image == {"path": image_path, "bytes": None}
dset.map(assert_image_batch_undecoded, batched=True)
@require_pil
def test_image_embed_storage(shared_datadir):
image_path = str(shared_datadir / "test_image_rgb.jpg")
example = {"bytes": None, "path": image_path}
storage = pa.array([example], type=pa.struct({"bytes": pa.binary(), "path": pa.string()}))
embedded_storage = Image().embed_storage(storage)
embedded_example = embedded_storage.to_pylist()[0]
assert embedded_example == {"bytes": open(image_path, "rb").read(), "path": "test_image_rgb.jpg"}
@require_pil
@pytest.mark.parametrize(
"array, dtype_cast, expected_image_format",
[
(np.arange(16).reshape(4, 4).astype(np.uint8), "exact_match", "PNG"),
(np.arange(16).reshape(4, 4).astype(np.uint16), "exact_match", "TIFF"),
(np.arange(16).reshape(4, 4).astype(np.int64), "downcast->|i4", "TIFF"),
(np.arange(16).reshape(2, 2, 4).astype(np.uint8), "exact_match", "PNG"),
(np.arange(16).reshape(2, 2, 4), "downcast->|u1", "PNG"),
(np.arange(16).reshape(2, 2, 4).astype(np.float64), "error", None),
],
)
def test_encode_np_array(array, dtype_cast, expected_image_format):
if dtype_cast.startswith("downcast"):
_, dest_dtype = dtype_cast.split("->")
dest_dtype = np.dtype(dest_dtype)
with pytest.warns(UserWarning, match=f"Downcasting array dtype.+{dest_dtype}.+"):
encoded_image = Image().encode_example(array)
elif dtype_cast == "error":
with pytest.raises(TypeError):
Image().encode_example(array)
return
else: # exact_match (no warnings are raised)
with warnings.catch_warnings():
warnings.simplefilter("error")
encoded_image = Image().encode_example(array)
assert isinstance(encoded_image, dict)
assert encoded_image.keys() == {"path", "bytes"}
assert encoded_image["path"] is None
assert encoded_image["bytes"] is not None and isinstance(encoded_image["bytes"], bytes)
decoded_image = Image().decode_example(encoded_image)
assert decoded_image.format == expected_image_format
np.testing.assert_array_equal(np.array(decoded_image), array)
| 0 |
hf_public_repos/datasets/tests | hf_public_repos/datasets/tests/features/test_audio.py | import os
import tarfile
import pyarrow as pa
import pytest
from datasets import Dataset, concatenate_datasets, load_dataset
from datasets.features import Audio, Features, Sequence, Value
from ..utils import (
require_sndfile,
)
@pytest.fixture()
def tar_wav_path(shared_datadir, tmp_path_factory):
audio_path = str(shared_datadir / "test_audio_44100.wav")
path = tmp_path_factory.mktemp("data") / "audio_data.wav.tar"
with tarfile.TarFile(path, "w") as f:
f.add(audio_path, arcname=os.path.basename(audio_path))
return path
@pytest.fixture()
def tar_mp3_path(shared_datadir, tmp_path_factory):
audio_path = str(shared_datadir / "test_audio_44100.mp3")
path = tmp_path_factory.mktemp("data") / "audio_data.mp3.tar"
with tarfile.TarFile(path, "w") as f:
f.add(audio_path, arcname=os.path.basename(audio_path))
return path
def iter_archive(archive_path):
with tarfile.open(archive_path) as tar:
for tarinfo in tar:
file_path = tarinfo.name
file_obj = tar.extractfile(tarinfo)
yield file_path, file_obj
def test_audio_instantiation():
audio = Audio()
assert audio.sampling_rate is None
assert audio.mono is True
assert audio.id is None
assert audio.dtype == "dict"
assert audio.pa_type == pa.struct({"bytes": pa.binary(), "path": pa.string()})
assert audio._type == "Audio"
def test_audio_feature_type_to_arrow():
features = Features({"audio": Audio()})
assert features.arrow_schema == pa.schema({"audio": Audio().pa_type})
features = Features({"struct_containing_an_audio": {"audio": Audio()}})
assert features.arrow_schema == pa.schema({"struct_containing_an_audio": pa.struct({"audio": Audio().pa_type})})
features = Features({"sequence_of_audios": Sequence(Audio())})
assert features.arrow_schema == pa.schema({"sequence_of_audios": pa.list_(Audio().pa_type)})
@pytest.mark.parametrize(
"build_example",
[
lambda audio_path: audio_path,
lambda audio_path: open(audio_path, "rb").read(),
lambda audio_path: {"path": audio_path},
lambda audio_path: {"path": audio_path, "bytes": None},
lambda audio_path: {"path": audio_path, "bytes": open(audio_path, "rb").read()},
lambda audio_path: {"path": None, "bytes": open(audio_path, "rb").read()},
lambda audio_path: {"bytes": open(audio_path, "rb").read()},
lambda audio_path: {"array": [0.1, 0.2, 0.3], "sampling_rate": 16_000},
],
)
def test_audio_feature_encode_example(shared_datadir, build_example):
audio_path = str(shared_datadir / "test_audio_44100.wav")
audio = Audio()
encoded_example = audio.encode_example(build_example(audio_path))
assert isinstance(encoded_example, dict)
assert encoded_example.keys() == {"bytes", "path"}
assert encoded_example["bytes"] is not None or encoded_example["path"] is not None
decoded_example = audio.decode_example(encoded_example)
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
@pytest.mark.parametrize(
"build_example",
[
lambda audio_path: {"path": audio_path, "sampling_rate": 16_000},
lambda audio_path: {"path": audio_path, "bytes": None, "sampling_rate": 16_000},
lambda audio_path: {"path": audio_path, "bytes": open(audio_path, "rb").read(), "sampling_rate": 16_000},
lambda audio_path: {"array": [0.1, 0.2, 0.3], "sampling_rate": 16_000},
],
)
def test_audio_feature_encode_example_pcm(shared_datadir, build_example):
audio_path = str(shared_datadir / "test_audio_16000.pcm")
audio = Audio(sampling_rate=16_000)
encoded_example = audio.encode_example(build_example(audio_path))
assert isinstance(encoded_example, dict)
assert encoded_example.keys() == {"bytes", "path"}
assert encoded_example["bytes"] is not None or encoded_example["path"] is not None
decoded_example = audio.decode_example(encoded_example)
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
@require_sndfile
def test_audio_decode_example(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
audio = Audio()
decoded_example = audio.decode_example(audio.encode_example(audio_path))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] == audio_path
assert decoded_example["array"].shape == (202311,)
assert decoded_example["sampling_rate"] == 44100
with pytest.raises(RuntimeError):
Audio(decode=False).decode_example(audio_path)
@require_sndfile
def test_audio_resampling(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
audio = Audio(sampling_rate=16000)
decoded_example = audio.decode_example(audio.encode_example(audio_path))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] == audio_path
assert decoded_example["array"].shape == (73401,)
assert decoded_example["sampling_rate"] == 16000
@require_sndfile
def test_audio_decode_example_mp3(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.mp3")
audio = Audio()
decoded_example = audio.decode_example(audio.encode_example(audio_path))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] == audio_path
assert decoded_example["array"].shape == (110592,)
assert decoded_example["sampling_rate"] == 44100
@require_sndfile
def test_audio_decode_example_opus(shared_datadir):
audio_path = str(shared_datadir / "test_audio_48000.opus")
audio = Audio()
decoded_example = audio.decode_example(audio.encode_example(audio_path))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] == audio_path
assert decoded_example["array"].shape == (48000,)
assert decoded_example["sampling_rate"] == 48000
@pytest.mark.parametrize("sampling_rate", [16_000, 48_000])
def test_audio_decode_example_pcm(shared_datadir, sampling_rate):
audio_path = str(shared_datadir / "test_audio_16000.pcm")
audio_input = {"path": audio_path, "sampling_rate": 16_000}
audio = Audio(sampling_rate=sampling_rate)
decoded_example = audio.decode_example(audio.encode_example(audio_input))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] is None
assert decoded_example["array"].shape == (16208 * sampling_rate // 16_000,)
assert decoded_example["sampling_rate"] == sampling_rate
@require_sndfile
def test_audio_resampling_mp3_different_sampling_rates(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.mp3")
audio_path2 = str(shared_datadir / "test_audio_16000.mp3")
audio = Audio(sampling_rate=48000)
decoded_example = audio.decode_example(audio.encode_example(audio_path))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] == audio_path
assert decoded_example["array"].shape == (120373,)
assert decoded_example["sampling_rate"] == 48000
decoded_example = audio.decode_example(audio.encode_example(audio_path2))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] == audio_path2
assert decoded_example["array"].shape == (122688,)
assert decoded_example["sampling_rate"] == 48000
@require_sndfile
def test_dataset_with_audio_feature(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path]}
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (202311,)
assert item["audio"]["sampling_rate"] == 44100
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (202311,)
assert batch["audio"][0]["sampling_rate"] == 44100
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (202311,)
assert column[0]["sampling_rate"] == 44100
@require_sndfile
def test_dataset_with_audio_feature_tar_wav(tar_wav_path):
audio_filename = "test_audio_44100.wav"
data = {"audio": []}
for file_path, file_obj in iter_archive(tar_wav_path):
data["audio"].append({"path": file_path, "bytes": file_obj.read()})
break
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_filename
assert item["audio"]["array"].shape == (202311,)
assert item["audio"]["sampling_rate"] == 44100
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_filename
assert batch["audio"][0]["array"].shape == (202311,)
assert batch["audio"][0]["sampling_rate"] == 44100
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_filename
assert column[0]["array"].shape == (202311,)
assert column[0]["sampling_rate"] == 44100
@require_sndfile
def test_dataset_with_audio_feature_tar_mp3(tar_mp3_path):
audio_filename = "test_audio_44100.mp3"
data = {"audio": []}
for file_path, file_obj in iter_archive(tar_mp3_path):
data["audio"].append({"path": file_path, "bytes": file_obj.read()})
break
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_filename
assert item["audio"]["array"].shape == (110592,)
assert item["audio"]["sampling_rate"] == 44100
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_filename
assert batch["audio"][0]["array"].shape == (110592,)
assert batch["audio"][0]["sampling_rate"] == 44100
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_filename
assert column[0]["array"].shape == (110592,)
assert column[0]["sampling_rate"] == 44100
@require_sndfile
def test_dataset_with_audio_feature_with_none():
data = {"audio": [None]}
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"] is None
batch = dset[:1]
assert len(batch) == 1
assert batch.keys() == {"audio"}
assert isinstance(batch["audio"], list) and all(item is None for item in batch["audio"])
column = dset["audio"]
assert len(column) == 1
assert isinstance(column, list) and all(item is None for item in column)
# nested tests
data = {"audio": [[None]]}
features = Features({"audio": Sequence(Audio())})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert all(i is None for i in item["audio"])
data = {"nested": [{"audio": None}]}
features = Features({"nested": {"audio": Audio()}})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"nested"}
assert item["nested"].keys() == {"audio"}
assert item["nested"]["audio"] is None
@require_sndfile
def test_resampling_at_loading_dataset_with_audio_feature(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path]}
features = Features({"audio": Audio(sampling_rate=16000)})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (73401,)
assert item["audio"]["sampling_rate"] == 16000
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (73401,)
assert batch["audio"][0]["sampling_rate"] == 16000
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (73401,)
assert column[0]["sampling_rate"] == 16000
@require_sndfile
def test_resampling_at_loading_dataset_with_audio_feature_mp3(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.mp3")
data = {"audio": [audio_path]}
features = Features({"audio": Audio(sampling_rate=16000)})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (40125,)
assert item["audio"]["sampling_rate"] == 16000
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (40125,)
assert batch["audio"][0]["sampling_rate"] == 16000
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (40125,)
assert column[0]["sampling_rate"] == 16000
@require_sndfile
def test_resampling_after_loading_dataset_with_audio_feature(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path]}
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item["audio"]["sampling_rate"] == 44100
dset = dset.cast_column("audio", Audio(sampling_rate=16000))
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (73401,)
assert item["audio"]["sampling_rate"] == 16000
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (73401,)
assert batch["audio"][0]["sampling_rate"] == 16000
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (73401,)
assert column[0]["sampling_rate"] == 16000
@require_sndfile
def test_resampling_after_loading_dataset_with_audio_feature_mp3(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.mp3")
data = {"audio": [audio_path]}
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item["audio"]["sampling_rate"] == 44100
dset = dset.cast_column("audio", Audio(sampling_rate=16000))
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (40125,)
assert item["audio"]["sampling_rate"] == 16000
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (40125,)
assert batch["audio"][0]["sampling_rate"] == 16000
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (40125,)
assert column[0]["sampling_rate"] == 16000
@pytest.mark.parametrize(
"build_data",
[
lambda audio_path: {"audio": [audio_path]},
lambda audio_path: {"audio": [open(audio_path, "rb").read()]},
lambda audio_path: {"audio": [{"path": audio_path}]},
lambda audio_path: {"audio": [{"path": audio_path, "bytes": None}]},
lambda audio_path: {"audio": [{"path": audio_path, "bytes": open(audio_path, "rb").read()}]},
lambda audio_path: {"audio": [{"path": None, "bytes": open(audio_path, "rb").read()}]},
lambda audio_path: {"audio": [{"bytes": open(audio_path, "rb").read()}]},
],
)
def test_dataset_cast_to_audio_features(shared_datadir, build_data):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = build_data(audio_path)
dset = Dataset.from_dict(data)
item = dset.cast(Features({"audio": Audio()}))[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
item = dset.cast_column("audio", Audio())[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
def test_dataset_concatenate_audio_features(shared_datadir):
# we use a different data structure between 1 and 2 to make sure they are compatible with each other
audio_path = str(shared_datadir / "test_audio_44100.wav")
data1 = {"audio": [audio_path]}
dset1 = Dataset.from_dict(data1, features=Features({"audio": Audio()}))
data2 = {"audio": [{"bytes": open(audio_path, "rb").read()}]}
dset2 = Dataset.from_dict(data2, features=Features({"audio": Audio()}))
concatenated_dataset = concatenate_datasets([dset1, dset2])
assert len(concatenated_dataset) == len(dset1) + len(dset2)
assert concatenated_dataset[0]["audio"]["array"].shape == dset1[0]["audio"]["array"].shape
assert concatenated_dataset[1]["audio"]["array"].shape == dset2[0]["audio"]["array"].shape
def test_dataset_concatenate_nested_audio_features(shared_datadir):
# we use a different data structure between 1 and 2 to make sure they are compatible with each other
audio_path = str(shared_datadir / "test_audio_44100.wav")
features = Features({"list_of_structs_of_audios": [{"audio": Audio()}]})
data1 = {"list_of_structs_of_audios": [[{"audio": audio_path}]]}
dset1 = Dataset.from_dict(data1, features=features)
data2 = {"list_of_structs_of_audios": [[{"audio": {"bytes": open(audio_path, "rb").read()}}]]}
dset2 = Dataset.from_dict(data2, features=features)
concatenated_dataset = concatenate_datasets([dset1, dset2])
assert len(concatenated_dataset) == len(dset1) + len(dset2)
assert (
concatenated_dataset[0]["list_of_structs_of_audios"][0]["audio"]["array"].shape
== dset1[0]["list_of_structs_of_audios"][0]["audio"]["array"].shape
)
assert (
concatenated_dataset[1]["list_of_structs_of_audios"][0]["audio"]["array"].shape
== dset2[0]["list_of_structs_of_audios"][0]["audio"]["array"].shape
)
@require_sndfile
def test_dataset_with_audio_feature_map_is_not_decoded(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path], "text": ["Hello"]}
features = Features({"audio": Audio(), "text": Value("string")})
dset = Dataset.from_dict(data, features=features)
expected_audio = features.encode_batch(data)["audio"][0]
for item in dset.cast_column("audio", Audio(decode=False)):
assert item.keys() == {"audio", "text"}
assert item == {"audio": expected_audio, "text": "Hello"}
def process_text(example):
example["text"] = example["text"] + " World!"
return example
processed_dset = dset.map(process_text)
for item in processed_dset.cast_column("audio", Audio(decode=False)):
assert item.keys() == {"audio", "text"}
assert item == {"audio": expected_audio, "text": "Hello World!"}
@require_sndfile
def test_dataset_with_audio_feature_map_is_decoded(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path], "text": ["Hello"]}
features = Features({"audio": Audio(), "text": Value("string")})
dset = Dataset.from_dict(data, features=features)
def process_audio_sampling_rate_by_example(example):
example["double_sampling_rate"] = 2 * example["audio"]["sampling_rate"]
return example
decoded_dset = dset.map(process_audio_sampling_rate_by_example)
for item in decoded_dset.cast_column("audio", Audio(decode=False)):
assert item.keys() == {"audio", "text", "double_sampling_rate"}
assert item["double_sampling_rate"] == 88200
def process_audio_sampling_rate_by_batch(batch):
double_sampling_rates = []
for audio in batch["audio"]:
double_sampling_rates.append(2 * audio["sampling_rate"])
batch["double_sampling_rate"] = double_sampling_rates
return batch
decoded_dset = dset.map(process_audio_sampling_rate_by_batch, batched=True)
for item in decoded_dset.cast_column("audio", Audio(decode=False)):
assert item.keys() == {"audio", "text", "double_sampling_rate"}
assert item["double_sampling_rate"] == 88200
@require_sndfile
def test_formatted_dataset_with_audio_feature(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path, audio_path]}
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
with dset.formatted_as("numpy"):
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (202311,)
assert item["audio"]["sampling_rate"] == 44100
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (202311,)
assert batch["audio"][0]["sampling_rate"] == 44100
column = dset["audio"]
assert len(column) == 2
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (202311,)
assert column[0]["sampling_rate"] == 44100
with dset.formatted_as("pandas"):
item = dset[0]
assert item.shape == (1, 1)
assert item.columns == ["audio"]
assert item["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert item["audio"][0]["path"] == audio_path
assert item["audio"][0]["array"].shape == (202311,)
assert item["audio"][0]["sampling_rate"] == 44100
batch = dset[:1]
assert batch.shape == (1, 1)
assert batch.columns == ["audio"]
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (202311,)
assert batch["audio"][0]["sampling_rate"] == 44100
column = dset["audio"]
assert len(column) == 2
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (202311,)
assert column[0]["sampling_rate"] == 44100
@pytest.fixture
def jsonl_audio_dataset_path(shared_datadir, tmp_path_factory):
import json
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = [{"audio": audio_path, "text": "Hello world!"}]
path = str(tmp_path_factory.mktemp("data") / "audio_dataset.jsonl")
with open(path, "w") as f:
for item in data:
f.write(json.dumps(item) + "\n")
return path
@require_sndfile
@pytest.mark.parametrize("streaming", [False, True])
def test_load_dataset_with_audio_feature(streaming, jsonl_audio_dataset_path, shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data_files = jsonl_audio_dataset_path
features = Features({"audio": Audio(), "text": Value("string")})
dset = load_dataset("json", split="train", data_files=data_files, features=features, streaming=streaming)
item = dset[0] if not streaming else next(iter(dset))
assert item.keys() == {"audio", "text"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (202311,)
assert item["audio"]["sampling_rate"] == 44100
@require_sndfile
@pytest.mark.integration
def test_dataset_with_audio_feature_loaded_from_cache():
# load first time
ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean")
# load from cache
ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
assert isinstance(ds, Dataset)
def test_dataset_with_audio_feature_undecoded(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path]}
features = Features({"audio": Audio(decode=False)})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"] == {"path": audio_path, "bytes": None}
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0] == {"path": audio_path, "bytes": None}
column = dset["audio"]
assert len(column) == 1
assert column[0] == {"path": audio_path, "bytes": None}
def test_formatted_dataset_with_audio_feature_undecoded(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path]}
features = Features({"audio": Audio(decode=False)})
dset = Dataset.from_dict(data, features=features)
with dset.formatted_as("numpy"):
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"] == {"path": audio_path, "bytes": None}
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0] == {"path": audio_path, "bytes": None}
column = dset["audio"]
assert len(column) == 1
assert column[0] == {"path": audio_path, "bytes": None}
with dset.formatted_as("pandas"):
item = dset[0]
assert item.shape == (1, 1)
assert item.columns == ["audio"]
assert item["audio"][0] == {"path": audio_path, "bytes": None}
batch = dset[:1]
assert batch.shape == (1, 1)
assert batch.columns == ["audio"]
assert batch["audio"][0] == {"path": audio_path, "bytes": None}
column = dset["audio"]
assert len(column) == 1
assert column[0] == {"path": audio_path, "bytes": None}
def test_dataset_with_audio_feature_map_undecoded(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path]}
features = Features({"audio": Audio(decode=False)})
dset = Dataset.from_dict(data, features=features)
def assert_audio_example_undecoded(example):
assert example["audio"] == {"path": audio_path, "bytes": None}
dset.map(assert_audio_example_undecoded)
def assert_audio_batch_undecoded(batch):
for audio in batch["audio"]:
assert audio == {"path": audio_path, "bytes": None}
dset.map(assert_audio_batch_undecoded, batched=True)
def test_audio_embed_storage(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
example = {"bytes": None, "path": audio_path}
storage = pa.array([example], type=pa.struct({"bytes": pa.binary(), "path": pa.string()}))
embedded_storage = Audio().embed_storage(storage)
embedded_example = embedded_storage.to_pylist()[0]
assert embedded_example == {"bytes": open(audio_path, "rb").read(), "path": "test_audio_44100.wav"}
| 0 |
hf_public_repos/datasets/tests | hf_public_repos/datasets/tests/features/test_features.py | import datetime
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from datasets import Array2D
from datasets.arrow_dataset import Dataset
from datasets.features import Audio, ClassLabel, Features, Image, Sequence, Value
from datasets.features.features import (
_arrow_to_datasets_dtype,
_cast_to_python_objects,
cast_to_python_objects,
encode_nested_example,
generate_from_dict,
string_to_arrow,
)
from datasets.features.translation import Translation, TranslationVariableLanguages
from datasets.info import DatasetInfo
from datasets.utils.py_utils import asdict
from ..utils import require_jax, require_tf, require_torch
class FeaturesTest(TestCase):
def test_from_arrow_schema_simple(self):
data = {"a": [{"b": {"c": "text"}}] * 10, "foo": [1] * 10}
original_features = Features({"a": {"b": {"c": Value("string")}}, "foo": Value("int64")})
dset = Dataset.from_dict(data, features=original_features)
new_features = dset.features
new_dset = Dataset.from_dict(data, features=new_features)
self.assertEqual(original_features.type, new_features.type)
self.assertDictEqual(dset[0], new_dset[0])
self.assertDictEqual(dset[:], new_dset[:])
def test_from_arrow_schema_with_sequence(self):
data = {"a": [{"b": {"c": ["text"]}}] * 10, "foo": [1] * 10}
original_features = Features({"a": {"b": Sequence({"c": Value("string")})}, "foo": Value("int64")})
dset = Dataset.from_dict(data, features=original_features)
new_features = dset.features
new_dset = Dataset.from_dict(data, features=new_features)
self.assertEqual(original_features.type, new_features.type)
self.assertDictEqual(dset[0], new_dset[0])
self.assertDictEqual(dset[:], new_dset[:])
def test_string_to_arrow_bijection_for_primitive_types(self):
supported_pyarrow_datatypes = [
pa.time32("s"),
pa.time64("us"),
pa.timestamp("s"),
pa.timestamp("ns", tz="America/New_York"),
pa.date32(),
pa.date64(),
pa.duration("s"),
pa.decimal128(10, 2),
pa.decimal256(40, -3),
pa.string(),
pa.int32(),
pa.float64(),
pa.array([datetime.time(1, 1, 1)]).type, # arrow type: DataType(time64[us])
]
for dt in supported_pyarrow_datatypes:
self.assertEqual(dt, string_to_arrow(_arrow_to_datasets_dtype(dt)))
unsupported_pyarrow_datatypes = [pa.list_(pa.float64())]
for dt in unsupported_pyarrow_datatypes:
with self.assertRaises(ValueError):
string_to_arrow(_arrow_to_datasets_dtype(dt))
supported_datasets_dtypes = [
"time32[s]",
"timestamp[ns]",
"timestamp[ns, tz=+07:30]",
"duration[us]",
"decimal128(30, -4)",
"int32",
"float64",
]
for sdt in supported_datasets_dtypes:
self.assertEqual(sdt, _arrow_to_datasets_dtype(string_to_arrow(sdt)))
unsupported_datasets_dtypes = [
"time32[ns]",
"timestamp[blob]",
"timestamp[[ns]]",
"timestamp[ns, tz=[ns]]",
"duration[[us]]",
"decimal20(30, -4)",
"int",
]
for sdt in unsupported_datasets_dtypes:
with self.assertRaises(ValueError):
string_to_arrow(sdt)
def test_feature_named_type(self):
"""reference: issue #1110"""
features = Features({"_type": Value("string")})
ds_info = DatasetInfo(features=features)
reloaded_features = Features.from_dict(asdict(ds_info)["features"])
assert features == reloaded_features
def test_feature_named_self_as_kwarg(self):
"""reference: issue #5641"""
features = Features(self=Value("string"))
ds_info = DatasetInfo(features=features)
reloaded_features = Features.from_dict(asdict(ds_info)["features"])
assert features == reloaded_features
def test_class_label_feature_with_no_labels(self):
"""reference: issue #4681"""
features = Features({"label": ClassLabel(names=[])})
ds_info = DatasetInfo(features=features)
reloaded_features = Features.from_dict(asdict(ds_info)["features"])
assert features == reloaded_features
def test_reorder_fields_as(self):
features = Features(
{
"id": Value("string"),
"document": {
"title": Value("string"),
"url": Value("string"),
"html": Value("string"),
"tokens": Sequence({"token": Value("string"), "is_html": Value("bool")}),
},
"question": {
"text": Value("string"),
"tokens": Sequence(Value("string")),
},
"annotations": Sequence(
{
"id": Value("string"),
"long_answer": {
"start_token": Value("int64"),
"end_token": Value("int64"),
"start_byte": Value("int64"),
"end_byte": Value("int64"),
},
"short_answers": Sequence(
{
"start_token": Value("int64"),
"end_token": Value("int64"),
"start_byte": Value("int64"),
"end_byte": Value("int64"),
"text": Value("string"),
}
),
"yes_no_answer": ClassLabel(names=["NO", "YES"]),
}
),
}
)
other = Features( # same but with [] instead of sequences, and with a shuffled fields order
{
"id": Value("string"),
"document": {
"tokens": Sequence({"token": Value("string"), "is_html": Value("bool")}),
"title": Value("string"),
"url": Value("string"),
"html": Value("string"),
},
"question": {
"text": Value("string"),
"tokens": [Value("string")],
},
"annotations": {
"yes_no_answer": [ClassLabel(names=["NO", "YES"])],
"id": [Value("string")],
"long_answer": [
{
"end_byte": Value("int64"),
"start_token": Value("int64"),
"end_token": Value("int64"),
"start_byte": Value("int64"),
}
],
"short_answers": [
Sequence(
{
"text": Value("string"),
"start_token": Value("int64"),
"end_token": Value("int64"),
"start_byte": Value("int64"),
"end_byte": Value("int64"),
}
)
],
},
}
)
expected = Features(
{
"id": Value("string"),
"document": {
"tokens": Sequence({"token": Value("string"), "is_html": Value("bool")}),
"title": Value("string"),
"url": Value("string"),
"html": Value("string"),
},
"question": {
"text": Value("string"),
"tokens": Sequence(Value("string")),
},
"annotations": Sequence(
{
"yes_no_answer": ClassLabel(names=["NO", "YES"]),
"id": Value("string"),
"long_answer": {
"end_byte": Value("int64"),
"start_token": Value("int64"),
"end_token": Value("int64"),
"start_byte": Value("int64"),
},
"short_answers": Sequence(
{
"text": Value("string"),
"start_token": Value("int64"),
"end_token": Value("int64"),
"start_byte": Value("int64"),
"end_byte": Value("int64"),
}
),
}
),
}
)
reordered_features = features.reorder_fields_as(other)
self.assertDictEqual(reordered_features, expected)
self.assertEqual(reordered_features.type, other.type)
self.assertEqual(reordered_features.type, expected.type)
self.assertNotEqual(reordered_features.type, features.type)
def test_flatten(self):
features = Features({"foo": {"bar1": Value("int32"), "bar2": {"foobar": Value("string")}}})
_features = features.copy()
flattened_features = features.flatten()
assert flattened_features == {"foo.bar1": Value("int32"), "foo.bar2.foobar": Value("string")}
assert features == _features, "calling flatten shouldn't alter the current features"
def test_flatten_with_sequence(self):
features = Features({"foo": Sequence({"bar": {"my_value": Value("int32")}})})
_features = features.copy()
flattened_features = features.flatten()
assert flattened_features == {"foo.bar": [{"my_value": Value("int32")}]}
assert features == _features, "calling flatten shouldn't alter the current features"
def test_features_dicts_are_synced(self):
def assert_features_dicts_are_synced(features: Features):
assert (
hasattr(features, "_column_requires_decoding")
and features.keys() == features._column_requires_decoding.keys()
)
features = Features({"foo": Sequence({"bar": {"my_value": Value("int32")}})})
assert_features_dicts_are_synced(features)
features["barfoo"] = Image()
assert_features_dicts_are_synced(features)
del features["barfoo"]
assert_features_dicts_are_synced(features)
features.update({"foobar": Value("string")})
assert_features_dicts_are_synced(features)
features.pop("foobar")
assert_features_dicts_are_synced(features)
features.popitem()
assert_features_dicts_are_synced(features)
features.setdefault("xyz", Value("bool"))
assert_features_dicts_are_synced(features)
features.clear()
assert_features_dicts_are_synced(features)
def test_classlabel_init(tmp_path_factory):
names = ["negative", "positive"]
names_file = str(tmp_path_factory.mktemp("features") / "labels.txt")
with open(names_file, "w", encoding="utf-8") as f:
f.write("\n".join(names))
classlabel = ClassLabel(names=names)
assert classlabel.names == names and classlabel.num_classes == len(names)
classlabel = ClassLabel(names_file=names_file)
assert classlabel.names == names and classlabel.num_classes == len(names)
classlabel = ClassLabel(num_classes=len(names), names=names)
assert classlabel.names == names and classlabel.num_classes == len(names)
classlabel = ClassLabel(num_classes=len(names))
assert classlabel.names == [str(i) for i in range(len(names))] and classlabel.num_classes == len(names)
with pytest.raises(ValueError):
classlabel = ClassLabel(num_classes=len(names) + 1, names=names)
with pytest.raises(ValueError):
classlabel = ClassLabel(names=names, names_file=names_file)
with pytest.raises(ValueError):
classlabel = ClassLabel()
with pytest.raises(TypeError):
classlabel = ClassLabel(names=np.array(names))
def test_classlabel_str2int():
names = ["negative", "positive"]
classlabel = ClassLabel(names=names)
for label in names:
assert classlabel.str2int(label) == names.index(label)
with pytest.raises(ValueError):
classlabel.str2int("__bad_label_name__")
with pytest.raises(ValueError):
classlabel.str2int(1)
with pytest.raises(ValueError):
classlabel.str2int(None)
def test_classlabel_int2str():
names = ["negative", "positive"]
classlabel = ClassLabel(names=names)
for i in range(len(names)):
assert classlabel.int2str(i) == names[i]
with pytest.raises(ValueError):
classlabel.int2str(len(names))
with pytest.raises(ValueError):
classlabel.int2str(-1)
with pytest.raises(ValueError):
classlabel.int2str(None)
def test_classlabel_cast_storage():
names = ["negative", "positive"]
classlabel = ClassLabel(names=names)
# from integers
arr = pa.array([0, 1, -1, -100], type=pa.int64())
result = classlabel.cast_storage(arr)
assert result.type == pa.int64()
assert result.to_pylist() == [0, 1, -1, -100]
arr = pa.array([0, 1, -1, -100], type=pa.int32())
result = classlabel.cast_storage(arr)
assert result.type == pa.int64()
assert result.to_pylist() == [0, 1, -1, -100]
arr = pa.array([3])
with pytest.raises(ValueError):
classlabel.cast_storage(arr)
# from strings
arr = pa.array(["negative", "positive"])
result = classlabel.cast_storage(arr)
assert result.type == pa.int64()
assert result.to_pylist() == [0, 1]
arr = pa.array(["__label_that_doesnt_exist__"])
with pytest.raises(ValueError):
classlabel.cast_storage(arr)
# from nulls
arr = pa.array([None])
result = classlabel.cast_storage(arr)
assert result.type == pa.int64()
assert result.to_pylist() == [None]
# from empty
arr = pa.array([], pa.int64())
result = classlabel.cast_storage(arr)
assert result.type == pa.int64()
assert result.to_pylist() == []
arr = pa.array([], pa.string())
result = classlabel.cast_storage(arr)
assert result.type == pa.int64()
assert result.to_pylist() == []
@pytest.mark.parametrize("class_label_arg", ["names", "names_file"])
def test_class_label_to_and_from_dict(class_label_arg, tmp_path_factory):
names = ["negative", "positive"]
names_file = str(tmp_path_factory.mktemp("features") / "labels.txt")
with open(names_file, "w", encoding="utf-8") as f:
f.write("\n".join(names))
if class_label_arg == "names":
class_label = ClassLabel(names=names)
elif class_label_arg == "names_file":
class_label = ClassLabel(names_file=names_file)
generated_class_label = generate_from_dict(asdict(class_label))
assert generated_class_label == class_label
@pytest.mark.parametrize("inner_type", [Value("int32"), {"subcolumn": Value("int32")}])
def test_encode_nested_example_sequence_with_none(inner_type):
schema = Sequence(inner_type)
obj = None
result = encode_nested_example(schema, obj)
assert result is None
def test_encode_batch_with_example_with_empty_first_elem():
features = Features(
{
"x": Sequence(Sequence(ClassLabel(names=["a", "b"]))),
}
)
encoded_batch = features.encode_batch(
{
"x": [
[["a"], ["b"]],
[[], ["b"]],
]
}
)
assert encoded_batch == {"x": [[[0], [1]], [[], [1]]]}
@pytest.mark.parametrize(
"feature",
[
Value("int32"),
ClassLabel(num_classes=2),
Translation(languages=["en", "fr"]),
TranslationVariableLanguages(languages=["en", "fr"]),
],
)
def test_dataset_feature_with_none(feature):
data = {"col": [None]}
features = Features({"col": feature})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"col"}
assert item["col"] is None
batch = dset[:1]
assert len(batch) == 1
assert batch.keys() == {"col"}
assert isinstance(batch["col"], list) and all(item is None for item in batch["col"])
column = dset["col"]
assert len(column) == 1
assert isinstance(column, list) and all(item is None for item in column)
# nested tests
data = {"col": [[None]]}
features = Features({"col": Sequence(feature)})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"col"}
assert all(i is None for i in item["col"])
data = {"nested": [{"col": None}]}
features = Features({"nested": {"col": feature}})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"nested"}
assert item["nested"].keys() == {"col"}
assert item["nested"]["col"] is None
def iternumpy(key1, value1, value2):
if value1.dtype != value2.dtype: # check only for dtype
raise AssertionError(
f"dtype of '{key1}' key for casted object: {value1.dtype} and expected object: {value2.dtype} not matching"
)
def dict_diff(d1: dict, d2: dict): # check if 2 dictionaries are equal
np.testing.assert_equal(d1, d2) # sanity check if dict values are equal or not
for (k1, v1), (k2, v2) in zip(d1.items(), d2.items()): # check if their values have same dtype or not
if isinstance(v1, dict): # nested dictionary case
dict_diff(v1, v2)
elif isinstance(v1, np.ndarray): # checks if dtype and value of np.ndarray is equal
iternumpy(k1, v1, v2)
elif isinstance(v1, list):
for element1, element2 in zip(v1, v2): # iterates over all elements of list
if isinstance(element1, dict):
dict_diff(element1, element2)
elif isinstance(element1, np.ndarray):
iternumpy(k1, element1, element2)
class CastToPythonObjectsTest(TestCase):
def test_cast_to_python_objects_list(self):
obj = {"col_1": [{"vec": [1, 2, 3], "txt": "foo"}] * 3, "col_2": [[1, 2], [3, 4], [5, 6]]}
expected_obj = {"col_1": [{"vec": [1, 2, 3], "txt": "foo"}] * 3, "col_2": [[1, 2], [3, 4], [5, 6]]}
casted_obj = cast_to_python_objects(obj)
self.assertDictEqual(casted_obj, expected_obj)
def test_cast_to_python_objects_tuple(self):
obj = {"col_1": [{"vec": (1, 2, 3), "txt": "foo"}] * 3, "col_2": [(1, 2), (3, 4), (5, 6)]}
expected_obj = {"col_1": [{"vec": (1, 2, 3), "txt": "foo"}] * 3, "col_2": [(1, 2), (3, 4), (5, 6)]}
casted_obj = cast_to_python_objects(obj)
self.assertDictEqual(casted_obj, expected_obj)
def test_cast_to_python_or_numpy(self):
obj = {"col_1": [{"vec": np.arange(1, 4), "txt": "foo"}] * 3, "col_2": np.arange(1, 7).reshape(3, 2)}
expected_obj = {
"col_1": [{"vec": np.array([1, 2, 3]), "txt": "foo"}] * 3,
"col_2": np.array([[1, 2], [3, 4], [5, 6]]),
}
casted_obj = cast_to_python_objects(obj)
dict_diff(casted_obj, expected_obj)
def test_cast_to_python_objects_series(self):
obj = {
"col_1": pd.Series([{"vec": [1, 2, 3], "txt": "foo"}] * 3),
"col_2": pd.Series([[1, 2], [3, 4], [5, 6]]),
}
expected_obj = {"col_1": [{"vec": [1, 2, 3], "txt": "foo"}] * 3, "col_2": [[1, 2], [3, 4], [5, 6]]}
casted_obj = cast_to_python_objects(obj)
self.assertDictEqual(casted_obj, expected_obj)
def test_cast_to_python_objects_dataframe(self):
obj = pd.DataFrame({"col_1": [{"vec": [1, 2, 3], "txt": "foo"}] * 3, "col_2": [[1, 2], [3, 4], [5, 6]]})
expected_obj = {"col_1": [{"vec": [1, 2, 3], "txt": "foo"}] * 3, "col_2": [[1, 2], [3, 4], [5, 6]]}
casted_obj = cast_to_python_objects(obj)
self.assertDictEqual(casted_obj, expected_obj)
def test_cast_to_python_objects_pandas_timestamp(self):
obj = pd.Timestamp(2020, 1, 1)
expected_obj = obj.to_pydatetime()
casted_obj = cast_to_python_objects(obj)
self.assertEqual(casted_obj, expected_obj)
casted_obj = cast_to_python_objects(pd.Series([obj]))
self.assertListEqual(casted_obj, [expected_obj])
casted_obj = cast_to_python_objects(pd.DataFrame({"a": [obj]}))
self.assertDictEqual(casted_obj, {"a": [expected_obj]})
def test_cast_to_python_objects_pandas_timedelta(self):
obj = pd.Timedelta(seconds=1)
expected_obj = obj.to_pytimedelta()
casted_obj = cast_to_python_objects(obj)
self.assertEqual(casted_obj, expected_obj)
casted_obj = cast_to_python_objects(pd.Series([obj]))
self.assertListEqual(casted_obj, [expected_obj])
casted_obj = cast_to_python_objects(pd.DataFrame({"a": [obj]}))
self.assertDictEqual(casted_obj, {"a": [expected_obj]})
@require_torch
def test_cast_to_python_objects_torch(self):
import torch
obj = {
"col_1": [{"vec": torch.tensor(np.arange(1, 4)), "txt": "foo"}] * 3,
"col_2": torch.tensor(np.arange(1, 7).reshape(3, 2)),
}
expected_obj = {
"col_1": [{"vec": np.array([1, 2, 3]), "txt": "foo"}] * 3,
"col_2": np.array([[1, 2], [3, 4], [5, 6]]),
}
casted_obj = cast_to_python_objects(obj)
dict_diff(casted_obj, expected_obj)
@require_tf
def test_cast_to_python_objects_tf(self):
import tensorflow as tf
obj = {
"col_1": [{"vec": tf.constant(np.arange(1, 4)), "txt": "foo"}] * 3,
"col_2": tf.constant(np.arange(1, 7).reshape(3, 2)),
}
expected_obj = {
"col_1": [{"vec": np.array([1, 2, 3]), "txt": "foo"}] * 3,
"col_2": np.array([[1, 2], [3, 4], [5, 6]]),
}
casted_obj = cast_to_python_objects(obj)
dict_diff(casted_obj, expected_obj)
@require_jax
def test_cast_to_python_objects_jax(self):
import jax.numpy as jnp
obj = {
"col_1": [{"vec": jnp.array(np.arange(1, 4)), "txt": "foo"}] * 3,
"col_2": jnp.array(np.arange(1, 7).reshape(3, 2)),
}
assert obj["col_2"].dtype == jnp.int32
expected_obj = {
"col_1": [{"vec": np.array([1, 2, 3], dtype=np.int32), "txt": "foo"}] * 3,
"col_2": np.array([[1, 2], [3, 4], [5, 6]], dtype=np.int32),
}
casted_obj = cast_to_python_objects(obj)
dict_diff(casted_obj, expected_obj)
@patch("datasets.features.features._cast_to_python_objects", side_effect=_cast_to_python_objects)
def test_dont_iterate_over_each_element_in_a_list(self, mocked_cast):
obj = {"col_1": [[1, 2], [3, 4], [5, 6]]}
cast_to_python_objects(obj)
self.assertEqual(mocked_cast.call_count, 4) # 4 = depth of obj
SIMPLE_FEATURES = [
Features(),
Features({"a": Value("int32")}),
Features({"a": Value("int32", id="my feature")}),
Features({"a": Value("int32"), "b": Value("float64"), "c": Value("string")}),
]
CUSTOM_FEATURES = [
Features({"label": ClassLabel(names=["negative", "positive"])}),
Features({"array": Array2D(dtype="float32", shape=(4, 4))}),
Features({"image": Image()}),
Features({"audio": Audio()}),
Features({"image": Image(decode=False)}),
Features({"audio": Audio(decode=False)}),
Features({"translation": Translation(["en", "fr"])}),
Features({"translation": TranslationVariableLanguages(["en", "fr"])}),
]
NESTED_FEATURES = [
Features({"foo": {}}),
Features({"foo": {"bar": Value("int32")}}),
Features({"foo": {"bar1": Value("int32"), "bar2": Value("float64")}}),
Features({"foo": Sequence(Value("int32"))}),
Features({"foo": Sequence({})}),
Features({"foo": Sequence({"bar": Value("int32")})}),
Features({"foo": [Value("int32")]}),
Features({"foo": [{"bar": Value("int32")}]}),
]
NESTED_CUSTOM_FEATURES = [
Features({"foo": {"bar": ClassLabel(names=["negative", "positive"])}}),
Features({"foo": Sequence(ClassLabel(names=["negative", "positive"]))}),
Features({"foo": Sequence({"bar": ClassLabel(names=["negative", "positive"])})}),
Features({"foo": [ClassLabel(names=["negative", "positive"])]}),
Features({"foo": [{"bar": ClassLabel(names=["negative", "positive"])}]}),
]
@pytest.mark.parametrize("features", SIMPLE_FEATURES + CUSTOM_FEATURES + NESTED_FEATURES + NESTED_CUSTOM_FEATURES)
def test_features_to_dict(features: Features):
features_dict = features.to_dict()
assert isinstance(features_dict, dict)
reloaded = Features.from_dict(features_dict)
assert features == reloaded
@pytest.mark.parametrize("features", SIMPLE_FEATURES + CUSTOM_FEATURES + NESTED_FEATURES + NESTED_CUSTOM_FEATURES)
def test_features_to_yaml_list(features: Features):
features_yaml_list = features._to_yaml_list()
assert isinstance(features_yaml_list, list)
reloaded = Features._from_yaml_list(features_yaml_list)
assert features == reloaded
@pytest.mark.parametrize("features", SIMPLE_FEATURES + CUSTOM_FEATURES + NESTED_FEATURES + NESTED_CUSTOM_FEATURES)
def test_features_to_arrow_schema(features: Features):
arrow_schema = features.arrow_schema
assert isinstance(arrow_schema, pa.Schema)
reloaded = Features.from_arrow_schema(arrow_schema)
assert features == reloaded
| 0 |
hf_public_repos/datasets/tests | hf_public_repos/datasets/tests/fixtures/files.py | import contextlib
import csv
import json
import os
import sqlite3
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
# dataset + arrow_file
@pytest.fixture(scope="session")
def dataset():
n = 10
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"])),
"answers": datasets.Sequence(
{
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
}
),
"id": datasets.Value("int64"),
}
)
dataset = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(n)),
},
features=features,
)
return dataset
@pytest.fixture(scope="session")
def arrow_file(tmp_path_factory, dataset):
filename = str(tmp_path_factory.mktemp("data") / "file.arrow")
dataset.map(cache_file_name=filename)
return filename
# FILE_CONTENT + files
FILE_CONTENT = """\
Text data.
Second line of data."""
@pytest.fixture(scope="session")
def text_file(tmp_path_factory):
filename = tmp_path_factory.mktemp("data") / "file.txt"
data = FILE_CONTENT
with open(filename, "w") as f:
f.write(data)
return filename
@pytest.fixture(scope="session")
def bz2_file(tmp_path_factory):
import bz2
path = tmp_path_factory.mktemp("data") / "file.txt.bz2"
data = bytes(FILE_CONTENT, "utf-8")
with bz2.open(path, "wb") as f:
f.write(data)
return path
@pytest.fixture(scope="session")
def gz_file(tmp_path_factory):
import gzip
path = str(tmp_path_factory.mktemp("data") / "file.txt.gz")
data = bytes(FILE_CONTENT, "utf-8")
with gzip.open(path, "wb") as f:
f.write(data)
return path
@pytest.fixture(scope="session")
def lz4_file(tmp_path_factory):
if datasets.config.LZ4_AVAILABLE:
import lz4.frame
path = tmp_path_factory.mktemp("data") / "file.txt.lz4"
data = bytes(FILE_CONTENT, "utf-8")
with lz4.frame.open(path, "wb") as f:
f.write(data)
return path
@pytest.fixture(scope="session")
def seven_zip_file(tmp_path_factory, text_file):
if datasets.config.PY7ZR_AVAILABLE:
import py7zr
path = tmp_path_factory.mktemp("data") / "file.txt.7z"
with py7zr.SevenZipFile(path, "w") as archive:
archive.write(text_file, arcname=os.path.basename(text_file))
return path
@pytest.fixture(scope="session")
def tar_file(tmp_path_factory, text_file):
import tarfile
path = tmp_path_factory.mktemp("data") / "file.txt.tar"
with tarfile.TarFile(path, "w") as f:
f.add(text_file, arcname=os.path.basename(text_file))
return path
@pytest.fixture(scope="session")
def xz_file(tmp_path_factory):
import lzma
path = tmp_path_factory.mktemp("data") / "file.txt.xz"
data = bytes(FILE_CONTENT, "utf-8")
with lzma.open(path, "wb") as f:
f.write(data)
return path
@pytest.fixture(scope="session")
def zip_file(tmp_path_factory, text_file):
import zipfile
path = tmp_path_factory.mktemp("data") / "file.txt.zip"
with zipfile.ZipFile(path, "w") as f:
f.write(text_file, arcname=os.path.basename(text_file))
return path
@pytest.fixture(scope="session")
def zstd_file(tmp_path_factory):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
path = tmp_path_factory.mktemp("data") / "file.txt.zst"
data = bytes(FILE_CONTENT, "utf-8")
with zstd.open(path, "wb") as f:
f.write(data)
return path
# xml_file
@pytest.fixture(scope="session")
def xml_file(tmp_path_factory):
filename = tmp_path_factory.mktemp("data") / "file.xml"
data = textwrap.dedent(
"""\
<?xml version="1.0" encoding="UTF-8" ?>
<tmx version="1.4">
<header segtype="sentence" srclang="ca" />
<body>
<tu>
<tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>
<tuv xml:lang="en"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>
<tuv xml:lang="en"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>
<tuv xml:lang="en"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>
<tuv xml:lang="en"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>
<tuv xml:lang="en"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>"""
)
with open(filename, "w") as f:
f.write(data)
return filename
DATA = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
DATA2 = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
DATA_DICT_OF_LISTS = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
DATA_312 = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
DATA_STR = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope="session")
def dataset_dict():
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session")
def arrow_path(tmp_path_factory):
dataset = datasets.Dataset.from_dict(DATA_DICT_OF_LISTS)
path = str(tmp_path_factory.mktemp("data") / "dataset.arrow")
dataset.map(cache_file_name=path)
return path
@pytest.fixture(scope="session")
def sqlite_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset.sqlite")
with contextlib.closing(sqlite3.connect(path)) as con:
cur = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)")
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)", tuple(item.values()))
con.commit()
return path
@pytest.fixture(scope="session")
def csv_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset.csv")
with open(path, "w", newline="") as f:
writer = csv.DictWriter(f, fieldnames=["col_1", "col_2", "col_3"])
writer.writeheader()
for item in DATA:
writer.writerow(item)
return path
@pytest.fixture(scope="session")
def csv2_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset2.csv")
with open(path, "w", newline="") as f:
writer = csv.DictWriter(f, fieldnames=["col_1", "col_2", "col_3"])
writer.writeheader()
for item in DATA:
writer.writerow(item)
return path
@pytest.fixture(scope="session")
def bz2_csv_path(csv_path, tmp_path_factory):
import bz2
path = tmp_path_factory.mktemp("data") / "dataset.csv.bz2"
with open(csv_path, "rb") as f:
data = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bz2.open(path, "wb") as f:
f.write(data)
return path
@pytest.fixture(scope="session")
def zip_csv_path(csv_path, csv2_path, tmp_path_factory):
path = tmp_path_factory.mktemp("data") / "dataset.csv.zip"
with zipfile.ZipFile(path, "w") as f:
f.write(csv_path, arcname=os.path.basename(csv_path))
f.write(csv2_path, arcname=os.path.basename(csv2_path))
return path
@pytest.fixture(scope="session")
def zip_uppercase_csv_path(csv_path, csv2_path, tmp_path_factory):
path = tmp_path_factory.mktemp("data") / "dataset.csv.zip"
with zipfile.ZipFile(path, "w") as f:
f.write(csv_path, arcname=os.path.basename(csv_path.replace(".csv", ".CSV")))
f.write(csv2_path, arcname=os.path.basename(csv2_path.replace(".csv", ".CSV")))
return path
@pytest.fixture(scope="session")
def zip_csv_with_dir_path(csv_path, csv2_path, tmp_path_factory):
path = tmp_path_factory.mktemp("data") / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(path, "w") as f:
f.write(csv_path, arcname=os.path.join("main_dir", os.path.basename(csv_path)))
f.write(csv2_path, arcname=os.path.join("main_dir", os.path.basename(csv2_path)))
return path
@pytest.fixture(scope="session")
def parquet_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset.parquet")
schema = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.int64(),
"col_3": pa.float64(),
}
)
with open(path, "wb") as f:
writer = pq.ParquetWriter(f, schema=schema)
pa_table = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(DATA))] for k in DATA[0]}, schema=schema)
writer.write_table(pa_table)
writer.close()
return path
@pytest.fixture(scope="session")
def json_list_of_dicts_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset.json")
data = {"data": DATA}
with open(path, "w") as f:
json.dump(data, f)
return path
@pytest.fixture(scope="session")
def json_dict_of_lists_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset.json")
data = {"data": DATA_DICT_OF_LISTS}
with open(path, "w") as f:
json.dump(data, f)
return path
@pytest.fixture(scope="session")
def jsonl_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset.jsonl")
with open(path, "w") as f:
for item in DATA:
f.write(json.dumps(item) + "\n")
return path
@pytest.fixture(scope="session")
def jsonl2_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset2.jsonl")
with open(path, "w") as f:
for item in DATA:
f.write(json.dumps(item) + "\n")
return path
@pytest.fixture(scope="session")
def jsonl_312_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset_312.jsonl")
with open(path, "w") as f:
for item in DATA_312:
f.write(json.dumps(item) + "\n")
return path
@pytest.fixture(scope="session")
def jsonl_str_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset-str.jsonl")
with open(path, "w") as f:
for item in DATA_STR:
f.write(json.dumps(item) + "\n")
return path
@pytest.fixture(scope="session")
def text_gz_path(tmp_path_factory, text_path):
import gzip
path = str(tmp_path_factory.mktemp("data") / "dataset.txt.gz")
with open(text_path, "rb") as orig_file:
with gzip.open(path, "wb") as zipped_file:
zipped_file.writelines(orig_file)
return path
@pytest.fixture(scope="session")
def jsonl_gz_path(tmp_path_factory, jsonl_path):
import gzip
path = str(tmp_path_factory.mktemp("data") / "dataset.jsonl.gz")
with open(jsonl_path, "rb") as orig_file:
with gzip.open(path, "wb") as zipped_file:
zipped_file.writelines(orig_file)
return path
@pytest.fixture(scope="session")
def zip_jsonl_path(jsonl_path, jsonl2_path, tmp_path_factory):
path = tmp_path_factory.mktemp("data") / "dataset.jsonl.zip"
with zipfile.ZipFile(path, "w") as f:
f.write(jsonl_path, arcname=os.path.basename(jsonl_path))
f.write(jsonl2_path, arcname=os.path.basename(jsonl2_path))
return path
@pytest.fixture(scope="session")
def zip_nested_jsonl_path(zip_jsonl_path, jsonl_path, jsonl2_path, tmp_path_factory):
path = tmp_path_factory.mktemp("data") / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(path, "w") as f:
f.write(zip_jsonl_path, arcname=os.path.join("nested", os.path.basename(zip_jsonl_path)))
return path
@pytest.fixture(scope="session")
def zip_jsonl_with_dir_path(jsonl_path, jsonl2_path, tmp_path_factory):
path = tmp_path_factory.mktemp("data") / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(path, "w") as f:
f.write(jsonl_path, arcname=os.path.join("main_dir", os.path.basename(jsonl_path)))
f.write(jsonl2_path, arcname=os.path.join("main_dir", os.path.basename(jsonl2_path)))
return path
@pytest.fixture(scope="session")
def tar_jsonl_path(jsonl_path, jsonl2_path, tmp_path_factory):
path = tmp_path_factory.mktemp("data") / "dataset.jsonl.tar"
with tarfile.TarFile(path, "w") as f:
f.add(jsonl_path, arcname=os.path.basename(jsonl_path))
f.add(jsonl2_path, arcname=os.path.basename(jsonl2_path))
return path
@pytest.fixture(scope="session")
def tar_nested_jsonl_path(tar_jsonl_path, jsonl_path, jsonl2_path, tmp_path_factory):
path = tmp_path_factory.mktemp("data") / "dataset_nested.jsonl.tar"
with tarfile.TarFile(path, "w") as f:
f.add(tar_jsonl_path, arcname=os.path.join("nested", os.path.basename(tar_jsonl_path)))
return path
@pytest.fixture(scope="session")
def text_path(tmp_path_factory):
data = ["0", "1", "2", "3"]
path = str(tmp_path_factory.mktemp("data") / "dataset.txt")
with open(path, "w") as f:
for item in data:
f.write(item + "\n")
return path
@pytest.fixture(scope="session")
def text2_path(tmp_path_factory):
data = ["0", "1", "2", "3"]
path = str(tmp_path_factory.mktemp("data") / "dataset2.txt")
with open(path, "w") as f:
for item in data:
f.write(item + "\n")
return path
@pytest.fixture(scope="session")
def text_dir_with_unsupported_extension(tmp_path_factory):
data = ["0", "1", "2", "3"]
path = tmp_path_factory.mktemp("data") / "dataset.abc"
with open(path, "w") as f:
for item in data:
f.write(item + "\n")
return path
@pytest.fixture(scope="session")
def zip_text_path(text_path, text2_path, tmp_path_factory):
path = tmp_path_factory.mktemp("data") / "dataset.text.zip"
with zipfile.ZipFile(path, "w") as f:
f.write(text_path, arcname=os.path.basename(text_path))
f.write(text2_path, arcname=os.path.basename(text2_path))
return path
@pytest.fixture(scope="session")
def zip_text_with_dir_path(text_path, text2_path, tmp_path_factory):
path = tmp_path_factory.mktemp("data") / "dataset_with_dir.text.zip"
with zipfile.ZipFile(path, "w") as f:
f.write(text_path, arcname=os.path.join("main_dir", os.path.basename(text_path)))
f.write(text2_path, arcname=os.path.join("main_dir", os.path.basename(text2_path)))
return path
@pytest.fixture(scope="session")
def zip_unsupported_ext_path(text_path, text2_path, tmp_path_factory):
path = tmp_path_factory.mktemp("data") / "dataset.ext.zip"
with zipfile.ZipFile(path, "w") as f:
f.write(text_path, arcname=os.path.basename("unsupported.ext"))
f.write(text2_path, arcname=os.path.basename("unsupported_2.ext"))
return path
@pytest.fixture(scope="session")
def text_path_with_unicode_new_lines(tmp_path_factory):
text = "\n".join(["First", "Second\u2029with Unicode new line", "Third"])
path = str(tmp_path_factory.mktemp("data") / "dataset_with_unicode_new_lines.txt")
with open(path, "w", encoding="utf-8") as f:
f.write(text)
return path
@pytest.fixture(scope="session")
def image_file():
return os.path.join("tests", "features", "data", "test_image_rgb.jpg")
@pytest.fixture(scope="session")
def audio_file():
return os.path.join("tests", "features", "data", "test_audio_44100.wav")
@pytest.fixture(scope="session")
def zip_image_path(image_file, tmp_path_factory):
path = tmp_path_factory.mktemp("data") / "dataset.img.zip"
with zipfile.ZipFile(path, "w") as f:
f.write(image_file, arcname=os.path.basename(image_file))
f.write(image_file, arcname=os.path.basename(image_file).replace(".jpg", "2.jpg"))
return path
@pytest.fixture(scope="session")
def data_dir_with_hidden_files(tmp_path_factory):
data_dir = tmp_path_factory.mktemp("data_dir")
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt", "w") as f:
f.write("foo\n" * 10)
with open(data_dir / "subdir" / "test.txt", "w") as f:
f.write("bar\n" * 10)
# hidden file
with open(data_dir / "subdir" / ".test.txt", "w") as f:
f.write("bar\n" * 10)
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt", "w") as f:
f.write("foo\n" * 10)
with open(data_dir / ".subdir" / "test.txt", "w") as f:
f.write("bar\n" * 10)
return data_dir
| 0 |
hf_public_repos/datasets/tests | hf_public_repos/datasets/tests/fixtures/fsspec.py | import posixpath
from pathlib import Path
from unittest.mock import patch
import pytest
from fsspec.implementations.local import AbstractFileSystem, LocalFileSystem, stringify_path
from fsspec.registry import _registry as _fsspec_registry
class MockFileSystem(AbstractFileSystem):
protocol = "mock"
def __init__(self, *args, local_root_dir, **kwargs):
super().__init__()
self._fs = LocalFileSystem(*args, **kwargs)
self.local_root_dir = Path(local_root_dir).resolve().as_posix() + "/"
def mkdir(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.mkdir(path, *args, **kwargs)
def makedirs(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.makedirs(path, *args, **kwargs)
def rmdir(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rmdir(path)
def ls(self, path, detail=True, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
out = self._fs.ls(path, detail=detail, *args, **kwargs)
if detail:
return [{**info, "name": info["name"][len(self.local_root_dir) :]} for info in out]
else:
return [name[len(self.local_root_dir) :] for name in out]
def info(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
out = dict(self._fs.info(path, *args, **kwargs))
out["name"] = out["name"][len(self.local_root_dir) :]
return out
def cp_file(self, path1, path2, *args, **kwargs):
path1 = posixpath.join(self.local_root_dir, self._strip_protocol(path1))
path2 = posixpath.join(self.local_root_dir, self._strip_protocol(path2))
return self._fs.cp_file(path1, path2, *args, **kwargs)
def rm_file(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rm_file(path, *args, **kwargs)
def rm(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rm(path, *args, **kwargs)
def _open(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs._open(path, *args, **kwargs)
def created(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.created(path)
def modified(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.modified(path)
@classmethod
def _strip_protocol(cls, path):
path = stringify_path(path)
if path.startswith("mock://"):
path = path[7:]
return path
class TmpDirFileSystem(MockFileSystem):
protocol = "tmp"
tmp_dir = None
def __init__(self, *args, **kwargs):
assert self.tmp_dir is not None, "TmpDirFileSystem.tmp_dir is not set"
super().__init__(*args, **kwargs, local_root_dir=self.tmp_dir, auto_mkdir=True)
@classmethod
def _strip_protocol(cls, path):
path = stringify_path(path)
if path.startswith("tmp://"):
path = path[6:]
return path
@pytest.fixture
def mock_fsspec():
_fsspec_registry["mock"] = MockFileSystem
_fsspec_registry["tmp"] = TmpDirFileSystem
yield
del _fsspec_registry["mock"]
del _fsspec_registry["tmp"]
@pytest.fixture
def mockfs(tmp_path_factory, mock_fsspec):
local_fs_dir = tmp_path_factory.mktemp("mockfs")
return MockFileSystem(local_root_dir=local_fs_dir, auto_mkdir=True)
@pytest.fixture
def tmpfs(tmp_path_factory, mock_fsspec):
tmp_fs_dir = tmp_path_factory.mktemp("tmpfs")
with patch.object(TmpDirFileSystem, "tmp_dir", tmp_fs_dir):
yield TmpDirFileSystem()
TmpDirFileSystem.clear_instance_cache()
| 0 |
hf_public_repos/datasets/tests | hf_public_repos/datasets/tests/fixtures/hub.py | import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
CI_HUB_USER = "__DUMMY_TRANSFORMERS_USER__"
CI_HUB_USER_FULL_NAME = "Dummy User"
CI_HUB_USER_TOKEN = "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"
CI_HUB_ENDPOINT = "https://hub-ci.huggingface.co"
CI_HUB_DATASETS_URL = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
CI_HUB_TOKEN_PATH = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def ci_hfh_hf_hub_url(monkeypatch):
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE
)
@pytest.fixture
def ci_hub_config(monkeypatch):
monkeypatch.setattr("datasets.config.HF_ENDPOINT", CI_HUB_ENDPOINT)
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", CI_HUB_DATASETS_URL)
@pytest.fixture
def ci_hub_token_path(monkeypatch):
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token", CI_HUB_TOKEN_PATH)
@pytest.fixture
def set_ci_hub_access_token(ci_hub_config, ci_hub_token_path):
HfFolder.save_token(CI_HUB_USER_TOKEN)
yield
HfFolder.delete_token()
@pytest.fixture(scope="session")
def hf_api():
return HfApi(endpoint=CI_HUB_ENDPOINT)
@pytest.fixture(scope="session")
def hf_token(hf_api: HfApi):
previous_token = HfFolder.get_token()
HfFolder.save_token(CI_HUB_USER_TOKEN)
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(previous_token)
@pytest.fixture
def cleanup_repo(hf_api):
def _cleanup_repo(repo_id):
hf_api.delete_repo(repo_id, token=CI_HUB_USER_TOKEN, repo_type="dataset")
return _cleanup_repo
@pytest.fixture
def temporary_repo(cleanup_repo):
@contextmanager
def _temporary_repo(repo_id):
try:
yield repo_id
finally:
cleanup_repo(repo_id)
return _temporary_repo
@pytest.fixture(scope="session")
def hf_private_dataset_repo_txt_data_(hf_api: HfApi, hf_token, text_file):
repo_name = f"repo_txt_data-{int(time.time() * 10e3)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(text_file),
path_in_repo="data/text_data.txt",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_txt_data(hf_private_dataset_repo_txt_data_, ci_hub_config, ci_hfh_hf_hub_url):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_txt_data_(hf_api: HfApi, hf_token, zip_csv_with_dir_path):
repo_name = f"repo_zipped_txt_data-{int(time.time() * 10e3)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_csv_with_dir_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_txt_data(
hf_private_dataset_repo_zipped_txt_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_img_data_(hf_api: HfApi, hf_token, zip_image_path):
repo_name = f"repo_zipped_img_data-{int(time.time() * 10e3)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_image_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_img_data(
hf_private_dataset_repo_zipped_img_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_img_data_
| 0 |
hf_public_repos/datasets/tests | hf_public_repos/datasets/tests/packaged_modules/test_audiofolder.py | import shutil
import textwrap
import librosa
import numpy as np
import pytest
import soundfile as sf
from datasets import Audio, ClassLabel, Features, Value
from datasets.data_files import DataFilesDict, get_data_patterns
from datasets.download.streaming_download_manager import StreamingDownloadManager
from datasets.packaged_modules.audiofolder.audiofolder import AudioFolder
from ..utils import require_sndfile
@pytest.fixture
def cache_dir(tmp_path):
return str(tmp_path / "audiofolder_cache_dir")
@pytest.fixture
def data_files_with_labels_no_metadata(tmp_path, audio_file):
data_dir = tmp_path / "data_files_with_labels_no_metadata"
data_dir.mkdir(parents=True, exist_ok=True)
subdir_class_0 = data_dir / "fr"
subdir_class_0.mkdir(parents=True, exist_ok=True)
subdir_class_1 = data_dir / "uk"
subdir_class_1.mkdir(parents=True, exist_ok=True)
audio_filename = subdir_class_0 / "audio_fr.wav"
shutil.copyfile(audio_file, audio_filename)
audio_filename2 = subdir_class_1 / "audio_uk.wav"
shutil.copyfile(audio_file, audio_filename2)
data_files_with_labels_no_metadata = DataFilesDict.from_patterns(
get_data_patterns(str(data_dir)), data_dir.as_posix()
)
return data_files_with_labels_no_metadata
@pytest.fixture
def audio_files_with_labels_and_duplicated_label_key_in_metadata(tmp_path, audio_file):
data_dir = tmp_path / "audio_files_with_labels_and_label_key_in_metadata"
data_dir.mkdir(parents=True, exist_ok=True)
subdir_class_0 = data_dir / "fr"
subdir_class_0.mkdir(parents=True, exist_ok=True)
subdir_class_1 = data_dir / "uk"
subdir_class_1.mkdir(parents=True, exist_ok=True)
audio_filename = subdir_class_0 / "audio_fr.wav"
shutil.copyfile(audio_file, audio_filename)
audio_filename2 = subdir_class_1 / "audio_uk.wav"
shutil.copyfile(audio_file, audio_filename2)
audio_metadata_filename = tmp_path / data_dir / "metadata.jsonl"
audio_metadata = textwrap.dedent(
"""\
{"file_name": "fr/audio_fr.wav", "text": "Audio in French", "label": "Fr"}
{"file_name": "uk/audio_uk.wav", "text": "Audio in Ukrainian", "label": "Uk"}
"""
)
with open(audio_metadata_filename, "w", encoding="utf-8") as f:
f.write(audio_metadata)
return str(audio_filename), str(audio_filename2), str(audio_metadata_filename)
@pytest.fixture
def audio_file_with_metadata(tmp_path, audio_file):
audio_filename = tmp_path / "audio_file.wav"
shutil.copyfile(audio_file, audio_filename)
audio_metadata_filename = tmp_path / "metadata.jsonl"
audio_metadata = textwrap.dedent(
"""\
{"file_name": "audio_file.wav", "text": "Audio transcription"}
"""
)
with open(audio_metadata_filename, "w", encoding="utf-8") as f:
f.write(audio_metadata)
return str(audio_filename), str(audio_metadata_filename)
@pytest.fixture
def audio_files_with_metadata_that_misses_one_audio(tmp_path, audio_file):
audio_filename = tmp_path / "audio_file.wav"
shutil.copyfile(audio_file, audio_filename)
audio_filename2 = tmp_path / "audio_file2.wav"
shutil.copyfile(audio_file, audio_filename2)
audio_metadata_filename = tmp_path / "metadata.jsonl"
audio_metadata = textwrap.dedent(
"""\
{"file_name": "audio_file.wav", "text": "Audio transcription"}
"""
)
with open(audio_metadata_filename, "w", encoding="utf-8") as f:
f.write(audio_metadata)
return str(audio_filename), str(audio_filename2), str(audio_metadata_filename)
@pytest.fixture
def data_files_with_one_split_and_metadata(tmp_path, audio_file):
data_dir = tmp_path / "audiofolder_data_dir_with_metadata"
data_dir.mkdir(parents=True, exist_ok=True)
subdir = data_dir / "subdir"
subdir.mkdir(parents=True, exist_ok=True)
audio_filename = data_dir / "audio_file.wav"
shutil.copyfile(audio_file, audio_filename)
audio_filename2 = data_dir / "audio_file2.wav"
shutil.copyfile(audio_file, audio_filename2)
audio_filename3 = subdir / "audio_file3.wav" # in subdir
shutil.copyfile(audio_file, audio_filename3)
audio_metadata_filename = data_dir / "metadata.jsonl"
audio_metadata = textwrap.dedent(
"""\
{"file_name": "audio_file.wav", "text": "First audio transcription"}
{"file_name": "audio_file2.wav", "text": "Second audio transcription"}
{"file_name": "subdir/audio_file3.wav", "text": "Third audio transcription (in subdir)"}
"""
)
with open(audio_metadata_filename, "w", encoding="utf-8") as f:
f.write(audio_metadata)
data_files_with_one_split_and_metadata = DataFilesDict.from_patterns(
get_data_patterns(str(data_dir)), data_dir.as_posix()
)
assert len(data_files_with_one_split_and_metadata) == 1
assert len(data_files_with_one_split_and_metadata["train"]) == 4
return data_files_with_one_split_and_metadata
@pytest.fixture(params=["jsonl", "csv"])
def data_files_with_two_splits_and_metadata(request, tmp_path, audio_file):
data_dir = tmp_path / "audiofolder_data_dir_with_metadata"
data_dir.mkdir(parents=True, exist_ok=True)
train_dir = data_dir / "train"
train_dir.mkdir(parents=True, exist_ok=True)
test_dir = data_dir / "test"
test_dir.mkdir(parents=True, exist_ok=True)
audio_filename = train_dir / "audio_file.wav" # train audio
shutil.copyfile(audio_file, audio_filename)
audio_filename2 = train_dir / "audio_file2.wav" # train audio
shutil.copyfile(audio_file, audio_filename2)
audio_filename3 = test_dir / "audio_file3.wav" # test audio
shutil.copyfile(audio_file, audio_filename3)
train_audio_metadata_filename = train_dir / f"metadata.{request.param}"
audio_metadata = (
textwrap.dedent(
"""\
{"file_name": "audio_file.wav", "text": "First train audio transcription"}
{"file_name": "audio_file2.wav", "text": "Second train audio transcription"}
"""
)
if request.param == "jsonl"
else textwrap.dedent(
"""\
file_name,text
audio_file.wav,First train audio transcription
audio_file2.wav,Second train audio transcription
"""
)
)
with open(train_audio_metadata_filename, "w", encoding="utf-8") as f:
f.write(audio_metadata)
test_audio_metadata_filename = test_dir / f"metadata.{request.param}"
audio_metadata = (
textwrap.dedent(
"""\
{"file_name": "audio_file3.wav", "text": "Test audio transcription"}
"""
)
if request.param == "jsonl"
else textwrap.dedent(
"""\
file_name,text
audio_file3.wav,Test audio transcription
"""
)
)
with open(test_audio_metadata_filename, "w", encoding="utf-8") as f:
f.write(audio_metadata)
data_files_with_two_splits_and_metadata = DataFilesDict.from_patterns(
get_data_patterns(str(data_dir)), data_dir.as_posix()
)
assert len(data_files_with_two_splits_and_metadata) == 2
assert len(data_files_with_two_splits_and_metadata["train"]) == 3
assert len(data_files_with_two_splits_and_metadata["test"]) == 2
return data_files_with_two_splits_and_metadata
@pytest.fixture
def data_files_with_zip_archives(tmp_path, audio_file):
data_dir = tmp_path / "audiofolder_data_dir_with_zip_archives"
data_dir.mkdir(parents=True, exist_ok=True)
archive_dir = data_dir / "archive"
archive_dir.mkdir(parents=True, exist_ok=True)
subdir = archive_dir / "subdir"
subdir.mkdir(parents=True, exist_ok=True)
audio_filename = archive_dir / "audio_file.wav"
shutil.copyfile(audio_file, audio_filename)
audio_filename2 = subdir / "audio_file2.wav" # in subdir
# make sure they're two different audios
# Indeed we won't be able to compare the audio filenames, since the archive is not extracted in streaming mode
array, sampling_rate = librosa.load(str(audio_filename), sr=16000) # original sampling rate is 44100
sf.write(str(audio_filename2), array, samplerate=16000)
audio_metadata_filename = archive_dir / "metadata.jsonl"
audio_metadata = textwrap.dedent(
"""\
{"file_name": "audio_file.wav", "text": "First audio transcription"}
{"file_name": "subdir/audio_file2.wav", "text": "Second audio transcription (in subdir)"}
"""
)
with open(audio_metadata_filename, "w", encoding="utf-8") as f:
f.write(audio_metadata)
shutil.make_archive(str(archive_dir), "zip", archive_dir)
shutil.rmtree(str(archive_dir))
data_files_with_zip_archives = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix())
assert len(data_files_with_zip_archives) == 1
assert len(data_files_with_zip_archives["train"]) == 1
return data_files_with_zip_archives
@require_sndfile
# check that labels are inferred correctly from dir names
def test_generate_examples_with_labels(data_files_with_labels_no_metadata, cache_dir):
# there are no metadata.jsonl files in this test case
audiofolder = AudioFolder(data_files=data_files_with_labels_no_metadata, cache_dir=cache_dir, drop_labels=False)
audiofolder.download_and_prepare()
assert audiofolder.info.features == Features({"audio": Audio(), "label": ClassLabel(names=["fr", "uk"])})
dataset = list(audiofolder.as_dataset()["train"])
label_feature = audiofolder.info.features["label"]
assert dataset[0]["label"] == label_feature._str2int["fr"]
assert dataset[1]["label"] == label_feature._str2int["uk"]
@require_sndfile
@pytest.mark.parametrize("drop_metadata", [None, True, False])
@pytest.mark.parametrize("drop_labels", [None, True, False])
def test_generate_examples_duplicated_label_key(
audio_files_with_labels_and_duplicated_label_key_in_metadata, drop_metadata, drop_labels, cache_dir, caplog
):
fr_audio_file, uk_audio_file, audio_metadata_file = audio_files_with_labels_and_duplicated_label_key_in_metadata
audiofolder = AudioFolder(
drop_metadata=drop_metadata,
drop_labels=drop_labels,
data_files=[fr_audio_file, uk_audio_file, audio_metadata_file],
cache_dir=cache_dir,
)
if drop_labels is False:
# infer labels from directories even if metadata files are found
audiofolder.download_and_prepare()
warning_in_logs = any("ignoring metadata columns" in record.msg.lower() for record in caplog.records)
assert warning_in_logs if drop_metadata is not True else not warning_in_logs
dataset = audiofolder.as_dataset()["train"]
assert audiofolder.info.features["label"] == ClassLabel(names=["fr", "uk"])
assert all(example["label"] in audiofolder.info.features["label"]._str2int.values() for example in dataset)
else:
audiofolder.download_and_prepare()
dataset = audiofolder.as_dataset()["train"]
if drop_metadata is not True:
# labels are from metadata
assert audiofolder.info.features["label"] == Value("string")
assert all(example["label"] in ["Fr", "Uk"] for example in dataset)
else:
# drop both labels and metadata
assert audiofolder.info.features == Features({"audio": Audio()})
assert all(example.keys() == {"audio"} for example in dataset)
@require_sndfile
@pytest.mark.parametrize("drop_metadata", [None, True, False])
@pytest.mark.parametrize("drop_labels", [None, True, False])
def test_generate_examples_drop_labels(data_files_with_labels_no_metadata, drop_metadata, drop_labels):
audiofolder = AudioFolder(
drop_metadata=drop_metadata, drop_labels=drop_labels, data_files=data_files_with_labels_no_metadata
)
gen_kwargs = audiofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
# removing the labels explicitly requires drop_labels=True
assert gen_kwargs["add_labels"] is not bool(drop_labels)
assert gen_kwargs["add_metadata"] is False # metadata files is not present in this case
generator = audiofolder._generate_examples(**gen_kwargs)
if not drop_labels:
assert all(
example.keys() == {"audio", "label"} and all(val is not None for val in example.values())
for _, example in generator
)
else:
assert all(
example.keys() == {"audio"} and all(val is not None for val in example.values())
for _, example in generator
)
@require_sndfile
@pytest.mark.parametrize("drop_metadata", [None, True, False])
@pytest.mark.parametrize("drop_labels", [None, True, False])
def test_generate_examples_drop_metadata(audio_file_with_metadata, drop_metadata, drop_labels):
audio_file, audio_metadata_file = audio_file_with_metadata
audiofolder = AudioFolder(
drop_metadata=drop_metadata, drop_labels=drop_labels, data_files={"train": [audio_file, audio_metadata_file]}
)
gen_kwargs = audiofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
# since the dataset has metadata, removing the metadata explicitly requires drop_metadata=True
assert gen_kwargs["add_metadata"] is not bool(drop_metadata)
# since the dataset has metadata, adding the labels explicitly requires drop_labels=False
assert gen_kwargs["add_labels"] is (drop_labels is False)
generator = audiofolder._generate_examples(**gen_kwargs)
expected_columns = {"audio"}
if gen_kwargs["add_metadata"]:
expected_columns.add("text")
if gen_kwargs["add_labels"]:
expected_columns.add("label")
result = [example for _, example in generator]
assert len(result) == 1
example = result[0]
assert example.keys() == expected_columns
for column in expected_columns:
assert example[column] is not None
@require_sndfile
@pytest.mark.parametrize("drop_metadata", [None, True, False])
def test_generate_examples_with_metadata_in_wrong_location(audio_file, audio_file_with_metadata, drop_metadata):
_, audio_metadata_file = audio_file_with_metadata
audiofolder = AudioFolder(drop_metadata=drop_metadata, data_files={"train": [audio_file, audio_metadata_file]})
gen_kwargs = audiofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
generator = audiofolder._generate_examples(**gen_kwargs)
if not drop_metadata:
with pytest.raises(ValueError):
list(generator)
else:
assert all(
example.keys() == {"audio"} and all(val is not None for val in example.values())
for _, example in generator
)
@require_sndfile
@pytest.mark.parametrize("drop_metadata", [None, True, False])
def test_generate_examples_with_metadata_that_misses_one_audio(
audio_files_with_metadata_that_misses_one_audio, drop_metadata
):
audio_file, audio_file2, audio_metadata_file = audio_files_with_metadata_that_misses_one_audio
if not drop_metadata:
features = Features({"audio": Audio(), "text": Value("string")})
else:
features = Features({"audio": Audio()})
audiofolder = AudioFolder(
drop_metadata=drop_metadata,
features=features,
data_files={"train": [audio_file, audio_file2, audio_metadata_file]},
)
gen_kwargs = audiofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
generator = audiofolder._generate_examples(**gen_kwargs)
if not drop_metadata:
with pytest.raises(ValueError):
_ = list(generator)
else:
assert all(
example.keys() == {"audio"} and all(val is not None for val in example.values())
for _, example in generator
)
@require_sndfile
@pytest.mark.parametrize("streaming", [False, True])
def test_data_files_with_metadata_and_single_split(streaming, cache_dir, data_files_with_one_split_and_metadata):
data_files = data_files_with_one_split_and_metadata
audiofolder = AudioFolder(data_files=data_files, cache_dir=cache_dir)
audiofolder.download_and_prepare()
datasets = audiofolder.as_streaming_dataset() if streaming else audiofolder.as_dataset()
for split, data_files in data_files.items():
expected_num_of_audios = len(data_files) - 1 # don't count the metadata file
assert split in datasets
dataset = list(datasets[split])
assert len(dataset) == expected_num_of_audios
# make sure each sample has its own audio and metadata
assert len({example["audio"]["path"] for example in dataset}) == expected_num_of_audios
assert len({example["text"] for example in dataset}) == expected_num_of_audios
assert all(example["text"] is not None for example in dataset)
@require_sndfile
@pytest.mark.parametrize("streaming", [False, True])
def test_data_files_with_metadata_and_multiple_splits(streaming, cache_dir, data_files_with_two_splits_and_metadata):
data_files = data_files_with_two_splits_and_metadata
audiofolder = AudioFolder(data_files=data_files, cache_dir=cache_dir)
audiofolder.download_and_prepare()
datasets = audiofolder.as_streaming_dataset() if streaming else audiofolder.as_dataset()
for split, data_files in data_files.items():
expected_num_of_audios = len(data_files) - 1 # don't count the metadata file
assert split in datasets
dataset = list(datasets[split])
assert len(dataset) == expected_num_of_audios
# make sure each sample has its own audio and metadata
assert len({example["audio"]["path"] for example in dataset}) == expected_num_of_audios
assert len({example["text"] for example in dataset}) == expected_num_of_audios
assert all(example["text"] is not None for example in dataset)
@require_sndfile
@pytest.mark.parametrize("streaming", [False, True])
def test_data_files_with_metadata_and_archives(streaming, cache_dir, data_files_with_zip_archives):
audiofolder = AudioFolder(data_files=data_files_with_zip_archives, cache_dir=cache_dir)
audiofolder.download_and_prepare()
datasets = audiofolder.as_streaming_dataset() if streaming else audiofolder.as_dataset()
for split, data_files in data_files_with_zip_archives.items():
num_of_archives = len(data_files) # the metadata file is inside the archive
expected_num_of_audios = 2 * num_of_archives
assert split in datasets
dataset = list(datasets[split])
assert len(dataset) == expected_num_of_audios
# make sure each sample has its own audio (all arrays are different) and metadata
assert (
sum(np.array_equal(dataset[0]["audio"]["array"], example["audio"]["array"]) for example in dataset[1:])
== 0
)
assert len({example["text"] for example in dataset}) == expected_num_of_audios
assert all(example["text"] is not None for example in dataset)
@require_sndfile
def test_data_files_with_wrong_metadata_file_name(cache_dir, tmp_path, audio_file):
data_dir = tmp_path / "data_dir_with_bad_metadata"
data_dir.mkdir(parents=True, exist_ok=True)
shutil.copyfile(audio_file, data_dir / "audio_file.wav")
audio_metadata_filename = data_dir / "bad_metadata.jsonl" # bad file
audio_metadata = textwrap.dedent(
"""\
{"file_name": "audio_file.wav", "text": "Audio transcription"}
"""
)
with open(audio_metadata_filename, "w", encoding="utf-8") as f:
f.write(audio_metadata)
data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix())
audiofolder = AudioFolder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir)
audiofolder.download_and_prepare()
dataset = audiofolder.as_dataset(split="train")
# check that there are no metadata, since the metadata file name doesn't have the right name
assert "text" not in dataset.column_names
@require_sndfile
def test_data_files_with_wrong_audio_file_name_column_in_metadata_file(cache_dir, tmp_path, audio_file):
data_dir = tmp_path / "data_dir_with_bad_metadata"
data_dir.mkdir(parents=True, exist_ok=True)
shutil.copyfile(audio_file, data_dir / "audio_file.wav")
audio_metadata_filename = data_dir / "metadata.jsonl"
audio_metadata = textwrap.dedent( # with bad column "bad_file_name" instead of "file_name"
"""\
{"bad_file_name_column": "audio_file.wav", "text": "Audio transcription"}
"""
)
with open(audio_metadata_filename, "w", encoding="utf-8") as f:
f.write(audio_metadata)
data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix())
audiofolder = AudioFolder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir)
with pytest.raises(ValueError) as exc_info:
audiofolder.download_and_prepare()
assert "`file_name` must be present" in str(exc_info.value)
@require_sndfile
def test_data_files_with_with_metadata_in_different_formats(cache_dir, tmp_path, audio_file):
data_dir = tmp_path / "data_dir_with_metadata_in_different_format"
data_dir.mkdir(parents=True, exist_ok=True)
shutil.copyfile(audio_file, data_dir / "audio_file.wav")
audio_metadata_filename_jsonl = data_dir / "metadata.jsonl"
audio_metadata_jsonl = textwrap.dedent(
"""\
{"file_name": "audio_file.wav", "text": "Audio transcription"}
"""
)
with open(audio_metadata_filename_jsonl, "w", encoding="utf-8") as f:
f.write(audio_metadata_jsonl)
audio_metadata_filename_csv = data_dir / "metadata.csv"
audio_metadata_csv = textwrap.dedent(
"""\
file_name,text
audio_file.wav,Audio transcription
"""
)
with open(audio_metadata_filename_csv, "w", encoding="utf-8") as f:
f.write(audio_metadata_csv)
data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix())
audiofolder = AudioFolder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir)
with pytest.raises(ValueError) as exc_info:
audiofolder.download_and_prepare()
assert "metadata files with different extensions" in str(exc_info.value)
| 0 |
hf_public_repos/datasets/tests | hf_public_repos/datasets/tests/packaged_modules/test_folder_based_builder.py | import importlib
import shutil
import textwrap
import pytest
from datasets import ClassLabel, DownloadManager, Features, Value
from datasets.data_files import DataFilesDict, get_data_patterns
from datasets.download.streaming_download_manager import StreamingDownloadManager
from datasets.packaged_modules.folder_based_builder.folder_based_builder import (
FolderBasedBuilder,
FolderBasedBuilderConfig,
)
from datasets.tasks import TextClassification
remote_files = [
"https://huggingface.co/datasets/polinaeterna/texts/resolve/main/hallo.txt",
"https://huggingface.co/datasets/polinaeterna/texts/resolve/main/hello.txt",
"https://huggingface.co/datasets/polinaeterna/texts/resolve/main/class1/bonjour.txt",
"https://huggingface.co/datasets/polinaeterna/texts/resolve/main/class1/bonjour2.txt",
]
class DummyFolderBasedBuilder(FolderBasedBuilder):
BASE_FEATURE = dict
BASE_COLUMN_NAME = "base"
BUILDER_CONFIG_CLASS = FolderBasedBuilderConfig
EXTENSIONS = [".txt"]
CLASSIFICATION_TASK = TextClassification(text_column="base", label_column="label")
@pytest.fixture
def cache_dir(tmp_path):
return str(tmp_path / "autofolder_cache_dir")
@pytest.fixture
def auto_text_file(text_file):
return str(text_file)
@pytest.fixture
def data_files_with_labels_no_metadata(tmp_path, auto_text_file):
data_dir = tmp_path / "data_files_with_labels_no_metadata"
data_dir.mkdir(parents=True, exist_ok=True)
subdir_class_0 = data_dir / "class0"
subdir_class_0.mkdir(parents=True, exist_ok=True)
subdir_class_1 = data_dir / "class1"
subdir_class_1.mkdir(parents=True, exist_ok=True)
filename = subdir_class_0 / "file0.txt"
shutil.copyfile(auto_text_file, filename)
filename2 = subdir_class_1 / "file1.txt"
shutil.copyfile(auto_text_file, filename2)
data_files_with_labels_no_metadata = DataFilesDict.from_patterns(
get_data_patterns(str(data_dir)), data_dir.as_posix()
)
return data_files_with_labels_no_metadata
@pytest.fixture
def data_files_with_different_levels_no_metadata(tmp_path, auto_text_file):
data_dir = tmp_path / "data_files_with_different_levels"
data_dir.mkdir(parents=True, exist_ok=True)
subdir_class_0 = data_dir / "class0"
subdir_class_0.mkdir(parents=True, exist_ok=True)
subdir_class_1 = data_dir / "subdir" / "class1"
subdir_class_1.mkdir(parents=True, exist_ok=True)
filename = subdir_class_0 / "file0.txt"
shutil.copyfile(auto_text_file, filename)
filename2 = subdir_class_1 / "file1.txt"
shutil.copyfile(auto_text_file, filename2)
data_files_with_different_levels = DataFilesDict.from_patterns(
get_data_patterns(str(data_dir)), data_dir.as_posix()
)
return data_files_with_different_levels
@pytest.fixture
def data_files_with_one_label_no_metadata(tmp_path, auto_text_file):
# only one label found = all files in a single dir/in a root dir
data_dir = tmp_path / "data_files_with_one_label"
data_dir.mkdir(parents=True, exist_ok=True)
filename = data_dir / "file0.txt"
shutil.copyfile(auto_text_file, filename)
filename2 = data_dir / "file1.txt"
shutil.copyfile(auto_text_file, filename2)
data_files_with_one_label = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix())
return data_files_with_one_label
@pytest.fixture
def files_with_labels_and_duplicated_label_key_in_metadata(tmp_path, auto_text_file):
data_dir = tmp_path / "files_with_labels_and_label_key_in_metadata"
data_dir.mkdir(parents=True, exist_ok=True)
subdir_class_0 = data_dir / "class0"
subdir_class_0.mkdir(parents=True, exist_ok=True)
subdir_class_1 = data_dir / "class1"
subdir_class_1.mkdir(parents=True, exist_ok=True)
filename = subdir_class_0 / "file_class0.txt"
shutil.copyfile(auto_text_file, filename)
filename2 = subdir_class_1 / "file_class1.txt"
shutil.copyfile(auto_text_file, filename2)
metadata_filename = tmp_path / data_dir / "metadata.jsonl"
metadata = textwrap.dedent(
"""\
{"file_name": "class0/file_class0.txt", "additional_feature": "First dummy file", "label": "CLASS_0"}
{"file_name": "class1/file_class1.txt", "additional_feature": "Second dummy file", "label": "CLASS_1"}
"""
)
with open(metadata_filename, "w", encoding="utf-8") as f:
f.write(metadata)
return str(filename), str(filename2), str(metadata_filename)
@pytest.fixture
def file_with_metadata(tmp_path, text_file):
filename = tmp_path / "file.txt"
shutil.copyfile(text_file, filename)
metadata_filename = tmp_path / "metadata.jsonl"
metadata = textwrap.dedent(
"""\
{"file_name": "file.txt", "additional_feature": "Dummy file"}
"""
)
with open(metadata_filename, "w", encoding="utf-8") as f:
f.write(metadata)
return str(filename), str(metadata_filename)
@pytest.fixture()
def files_with_metadata_that_misses_one_sample(tmp_path, auto_text_file):
filename = tmp_path / "file.txt"
shutil.copyfile(auto_text_file, filename)
filename2 = tmp_path / "file2.txt"
shutil.copyfile(auto_text_file, filename2)
metadata_filename = tmp_path / "metadata.jsonl"
metadata = textwrap.dedent(
"""\
{"file_name": "file.txt", "additional_feature": "Dummy file"}
"""
)
with open(metadata_filename, "w", encoding="utf-8") as f:
f.write(metadata)
return str(filename), str(filename2), str(metadata_filename)
@pytest.fixture
def data_files_with_one_split_and_metadata(tmp_path, auto_text_file):
data_dir = tmp_path / "autofolder_data_dir_with_metadata_one_split"
data_dir.mkdir(parents=True, exist_ok=True)
subdir = data_dir / "subdir"
subdir.mkdir(parents=True, exist_ok=True)
filename = data_dir / "file.txt"
shutil.copyfile(auto_text_file, filename)
filename2 = data_dir / "file2.txt"
shutil.copyfile(auto_text_file, filename2)
filename3 = subdir / "file3.txt" # in subdir
shutil.copyfile(auto_text_file, filename3)
metadata_filename = data_dir / "metadata.jsonl"
metadata = textwrap.dedent(
"""\
{"file_name": "file.txt", "additional_feature": "Dummy file"}
{"file_name": "file2.txt", "additional_feature": "Second dummy file"}
{"file_name": "./subdir/file3.txt", "additional_feature": "Third dummy file"}
"""
)
with open(metadata_filename, "w", encoding="utf-8") as f:
f.write(metadata)
data_files_with_one_split_and_metadata = DataFilesDict.from_patterns(
get_data_patterns(str(data_dir)), data_dir.as_posix()
)
assert len(data_files_with_one_split_and_metadata) == 1
assert len(data_files_with_one_split_and_metadata["train"]) == 4
return data_files_with_one_split_and_metadata
@pytest.fixture
def data_files_with_two_splits_and_metadata(tmp_path, auto_text_file):
data_dir = tmp_path / "autofolder_data_dir_with_metadata_two_splits"
data_dir.mkdir(parents=True, exist_ok=True)
train_dir = data_dir / "train"
train_dir.mkdir(parents=True, exist_ok=True)
test_dir = data_dir / "test"
test_dir.mkdir(parents=True, exist_ok=True)
filename = train_dir / "file.txt" # train
shutil.copyfile(auto_text_file, filename)
filename2 = train_dir / "file2.txt" # train
shutil.copyfile(auto_text_file, filename2)
filename3 = test_dir / "file3.txt" # test
shutil.copyfile(auto_text_file, filename3)
train_metadata_filename = train_dir / "metadata.jsonl"
train_metadata = textwrap.dedent(
"""\
{"file_name": "file.txt", "additional_feature": "Train dummy file"}
{"file_name": "file2.txt", "additional_feature": "Second train dummy file"}
"""
)
with open(train_metadata_filename, "w", encoding="utf-8") as f:
f.write(train_metadata)
test_metadata_filename = test_dir / "metadata.jsonl"
test_metadata = textwrap.dedent(
"""\
{"file_name": "file3.txt", "additional_feature": "Test dummy file"}
"""
)
with open(test_metadata_filename, "w", encoding="utf-8") as f:
f.write(test_metadata)
data_files_with_two_splits_and_metadata = DataFilesDict.from_patterns(
get_data_patterns(str(data_dir)), data_dir.as_posix()
)
assert len(data_files_with_two_splits_and_metadata) == 2
assert len(data_files_with_two_splits_and_metadata["train"]) == 3
assert len(data_files_with_two_splits_and_metadata["test"]) == 2
return data_files_with_two_splits_and_metadata
@pytest.fixture
def data_files_with_zip_archives(tmp_path, auto_text_file):
data_dir = tmp_path / "autofolder_data_dir_with_zip_archives"
data_dir.mkdir(parents=True, exist_ok=True)
archive_dir = data_dir / "archive"
archive_dir.mkdir(parents=True, exist_ok=True)
subdir = archive_dir / "subdir"
subdir.mkdir(parents=True, exist_ok=True)
filename = archive_dir / "file.txt"
shutil.copyfile(auto_text_file, filename)
filename2 = subdir / "file2.txt" # in subdir
shutil.copyfile(auto_text_file, filename2)
metadata_filename = archive_dir / "metadata.jsonl"
metadata = textwrap.dedent(
"""\
{"file_name": "file.txt", "additional_feature": "Dummy file"}
{"file_name": "subdir/file2.txt", "additional_feature": "Second dummy file"}
"""
)
with open(metadata_filename, "w", encoding="utf-8") as f:
f.write(metadata)
shutil.make_archive(archive_dir, "zip", archive_dir)
shutil.rmtree(str(archive_dir))
data_files_with_zip_archives = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix())
assert len(data_files_with_zip_archives) == 1
assert len(data_files_with_zip_archives["train"]) == 1
return data_files_with_zip_archives
def test_inferring_labels_from_data_dirs(data_files_with_labels_no_metadata, cache_dir):
autofolder = DummyFolderBasedBuilder(
data_files=data_files_with_labels_no_metadata, cache_dir=cache_dir, drop_labels=False
)
gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
assert autofolder.info.features == Features({"base": {}, "label": ClassLabel(names=["class0", "class1"])})
generator = autofolder._generate_examples(**gen_kwargs)
assert all(example["label"] in {"class0", "class1"} for _, example in generator)
def test_default_folder_builder_not_usable(data_files_with_labels_no_metadata, cache_dir):
# builder would try to access non-existing attributes of a default `BuilderConfig` class
# as a custom one is not provided
with pytest.raises(AttributeError):
_ = FolderBasedBuilder(
data_files=data_files_with_labels_no_metadata,
cache_dir=cache_dir,
)
# test that AutoFolder is extended for streaming when it's child class is instantiated:
# see line 115 in src/datasets/streaming.py
def test_streaming_patched():
_ = DummyFolderBasedBuilder()
module = importlib.import_module(FolderBasedBuilder.__module__)
assert hasattr(module, "_patched_for_streaming")
assert module._patched_for_streaming
@pytest.mark.parametrize("drop_metadata", [None, True, False])
@pytest.mark.parametrize("drop_labels", [None, True, False])
def test_generate_examples_duplicated_label_key(
files_with_labels_and_duplicated_label_key_in_metadata, drop_metadata, drop_labels, cache_dir, caplog
):
class0_file, class1_file, metadata_file = files_with_labels_and_duplicated_label_key_in_metadata
autofolder = DummyFolderBasedBuilder(
data_files=[class0_file, class1_file, metadata_file],
cache_dir=cache_dir,
drop_metadata=drop_metadata,
drop_labels=drop_labels,
)
gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
generator = autofolder._generate_examples(**gen_kwargs)
if drop_labels is False:
# infer labels from directories even if metadata files are found
warning_in_logs = any("ignoring metadata columns" in record.msg.lower() for record in caplog.records)
assert warning_in_logs if drop_metadata is not True else not warning_in_logs
assert autofolder.info.features["label"] == ClassLabel(names=["class0", "class1"])
assert all(example["label"] in ["class0", "class1"] for _, example in generator)
else:
if drop_metadata is not True:
# labels are from metadata
assert autofolder.info.features["label"] == Value("string")
assert all(example["label"] in ["CLASS_0", "CLASS_1"] for _, example in generator)
else:
# drop both labels and metadata
assert autofolder.info.features == Features({"base": {}})
assert all(example.keys() == {"base"} for _, example in generator)
@pytest.mark.parametrize("drop_metadata", [None, True, False])
@pytest.mark.parametrize("drop_labels", [None, True, False])
def test_generate_examples_drop_labels(
data_files_with_labels_no_metadata, auto_text_file, drop_metadata, drop_labels, cache_dir
):
autofolder = DummyFolderBasedBuilder(
data_files=data_files_with_labels_no_metadata,
drop_metadata=drop_metadata,
drop_labels=drop_labels,
cache_dir=cache_dir,
)
gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
# removing labels explicitly requires drop_labels=True
assert gen_kwargs["add_labels"] is not bool(drop_labels)
assert gen_kwargs["add_metadata"] is False
generator = autofolder._generate_examples(**gen_kwargs)
if not drop_labels:
assert all(
example.keys() == {"base", "label"} and all(val is not None for val in example.values())
for _, example in generator
)
else:
assert all(
example.keys() == {"base"} and all(val is not None for val in example.values()) for _, example in generator
)
@pytest.mark.parametrize("drop_metadata", [None, True, False])
@pytest.mark.parametrize("drop_labels", [None, True, False])
def test_generate_examples_drop_metadata(file_with_metadata, drop_metadata, drop_labels, cache_dir):
file, metadata_file = file_with_metadata
autofolder = DummyFolderBasedBuilder(
data_files=[file, metadata_file],
drop_metadata=drop_metadata,
drop_labels=drop_labels,
cache_dir=cache_dir,
)
gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
# since the dataset has metadata, removing the metadata explicitly requires drop_metadata=True
assert gen_kwargs["add_metadata"] is not bool(drop_metadata)
# since the dataset has metadata, adding the labels explicitly requires drop_labels=False
assert gen_kwargs["add_labels"] is (drop_labels is False)
generator = autofolder._generate_examples(**gen_kwargs)
expected_columns = {"base"}
if gen_kwargs["add_metadata"]:
expected_columns.add("additional_feature")
if gen_kwargs["add_labels"]:
expected_columns.add("label")
result = [example for _, example in generator]
assert len(result) == 1
example = result[0]
assert example.keys() == expected_columns
for column in expected_columns:
assert example[column] is not None
@pytest.mark.parametrize("remote", [True, False])
@pytest.mark.parametrize("drop_labels", [None, True, False])
def test_data_files_with_different_levels_no_metadata(
data_files_with_different_levels_no_metadata, drop_labels, remote, cache_dir
):
data_files = remote_files if remote else data_files_with_different_levels_no_metadata
autofolder = DummyFolderBasedBuilder(
data_files=data_files,
cache_dir=cache_dir,
drop_labels=drop_labels,
)
gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
generator = autofolder._generate_examples(**gen_kwargs)
if drop_labels is not False:
# with None (default) we should drop labels if files are on different levels in dir structure
assert "label" not in autofolder.info.features
assert all(example.keys() == {"base"} for _, example in generator)
else:
assert "label" in autofolder.info.features
assert isinstance(autofolder.info.features["label"], ClassLabel)
assert all(example.keys() == {"base", "label"} for _, example in generator)
@pytest.mark.parametrize("remote", [False, True])
@pytest.mark.parametrize("drop_labels", [None, True, False])
def test_data_files_with_one_label_no_metadata(data_files_with_one_label_no_metadata, drop_labels, remote, cache_dir):
data_files = remote_files[:2] if remote else data_files_with_one_label_no_metadata
autofolder = DummyFolderBasedBuilder(
data_files=data_files,
cache_dir=cache_dir,
drop_labels=drop_labels,
)
gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
generator = autofolder._generate_examples(**gen_kwargs)
if drop_labels is not False:
# with None (default) we should drop labels if only one label is found (=if there is a single dir)
assert "label" not in autofolder.info.features
assert all(example.keys() == {"base"} for _, example in generator)
else:
assert "label" in autofolder.info.features
assert isinstance(autofolder.info.features["label"], ClassLabel)
assert all(example.keys() == {"base", "label"} for _, example in generator)
@pytest.mark.parametrize("drop_metadata", [None, True, False])
def test_data_files_with_metadata_that_misses_one_sample(
files_with_metadata_that_misses_one_sample, drop_metadata, cache_dir
):
file, file2, metadata_file = files_with_metadata_that_misses_one_sample
if not drop_metadata:
features = Features({"base": None, "additional_feature": Value("string")})
else:
features = Features({"base": None})
autofolder = DummyFolderBasedBuilder(
data_files=[file, file2, metadata_file],
drop_metadata=drop_metadata,
features=features,
cache_dir=cache_dir,
)
gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
generator = autofolder._generate_examples(**gen_kwargs)
if not drop_metadata:
with pytest.raises(ValueError):
list(generator)
else:
assert all(
example.keys() == {"base"} and all(val is not None for val in example.values()) for _, example in generator
)
@pytest.mark.parametrize("streaming", [False, True])
@pytest.mark.parametrize("n_splits", [1, 2])
def test_data_files_with_metadata_and_splits(
streaming, cache_dir, n_splits, data_files_with_one_split_and_metadata, data_files_with_two_splits_and_metadata
):
data_files = data_files_with_one_split_and_metadata if n_splits == 1 else data_files_with_two_splits_and_metadata
autofolder = DummyFolderBasedBuilder(
data_files=data_files,
cache_dir=cache_dir,
)
download_manager = StreamingDownloadManager() if streaming else DownloadManager()
generated_splits = autofolder._split_generators(download_manager)
for (split, files), generated_split in zip(data_files.items(), generated_splits):
assert split == generated_split.name
expected_num_of_examples = len(files) - 1
generated_examples = list(autofolder._generate_examples(**generated_split.gen_kwargs))
assert len(generated_examples) == expected_num_of_examples
assert len({example["base"] for _, example in generated_examples}) == expected_num_of_examples
assert len({example["additional_feature"] for _, example in generated_examples}) == expected_num_of_examples
assert all(example["additional_feature"] is not None for _, example in generated_examples)
@pytest.mark.parametrize("streaming", [False, True])
def test_data_files_with_metadata_and_archives(streaming, cache_dir, data_files_with_zip_archives):
autofolder = DummyFolderBasedBuilder(data_files=data_files_with_zip_archives, cache_dir=cache_dir)
download_manager = StreamingDownloadManager() if streaming else DownloadManager()
generated_splits = autofolder._split_generators(download_manager)
for (split, files), generated_split in zip(data_files_with_zip_archives.items(), generated_splits):
assert split == generated_split.name
num_of_archives = len(files)
expected_num_of_examples = 2 * num_of_archives
generated_examples = list(autofolder._generate_examples(**generated_split.gen_kwargs))
assert len(generated_examples) == expected_num_of_examples
assert len({example["base"] for _, example in generated_examples}) == expected_num_of_examples
assert len({example["additional_feature"] for _, example in generated_examples}) == expected_num_of_examples
assert all(example["additional_feature"] is not None for _, example in generated_examples)
def test_data_files_with_wrong_metadata_file_name(cache_dir, tmp_path, auto_text_file):
data_dir = tmp_path / "data_dir_with_bad_metadata"
data_dir.mkdir(parents=True, exist_ok=True)
shutil.copyfile(auto_text_file, data_dir / "file.txt")
metadata_filename = data_dir / "bad_metadata.jsonl" # bad file
metadata = textwrap.dedent(
"""\
{"file_name": "file.txt", "additional_feature": "Dummy file"}
"""
)
with open(metadata_filename, "w", encoding="utf-8") as f:
f.write(metadata)
data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix())
autofolder = DummyFolderBasedBuilder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir)
gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
generator = autofolder._generate_examples(**gen_kwargs)
assert all("additional_feature" not in example for _, example in generator)
def test_data_files_with_wrong_file_name_column_in_metadata_file(cache_dir, tmp_path, auto_text_file):
data_dir = tmp_path / "data_dir_with_bad_metadata"
data_dir.mkdir(parents=True, exist_ok=True)
shutil.copyfile(auto_text_file, data_dir / "file.txt")
metadata_filename = data_dir / "metadata.jsonl"
metadata = textwrap.dedent( # with bad column "bad_file_name" instead of "file_name"
"""\
{"bad_file_name": "file.txt", "additional_feature": "Dummy file"}
"""
)
with open(metadata_filename, "w", encoding="utf-8") as f:
f.write(metadata)
data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix())
autofolder = DummyFolderBasedBuilder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir)
with pytest.raises(ValueError) as exc_info:
_ = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
assert "`file_name` must be present" in str(exc_info.value)
| 0 |
hf_public_repos/datasets/tests | hf_public_repos/datasets/tests/packaged_modules/test_json.py | import textwrap
import pyarrow as pa
import pytest
from datasets import Features, Value
from datasets.packaged_modules.json.json import Json
@pytest.fixture
def jsonl_file(tmp_path):
filename = tmp_path / "file.jsonl"
data = textwrap.dedent(
"""\
{"col_1": -1}
{"col_1": 1, "col_2": 2}
{"col_1": 10, "col_2": 20}
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def jsonl_file_utf16_encoded(tmp_path):
filename = tmp_path / "file_utf16_encoded.jsonl"
data = textwrap.dedent(
"""\
{"col_1": -1}
{"col_1": 1, "col_2": 2}
{"col_1": 10, "col_2": 20}
"""
)
with open(filename, "w", encoding="utf-16") as f:
f.write(data)
return str(filename)
@pytest.fixture
def json_file_with_list_of_dicts(tmp_path):
filename = tmp_path / "file_with_list_of_dicts.json"
data = textwrap.dedent(
"""\
[
{"col_1": -1},
{"col_1": 1, "col_2": 2},
{"col_1": 10, "col_2": 20}
]
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def json_file_with_list_of_dicts_field(tmp_path):
filename = tmp_path / "file_with_list_of_dicts_field.json"
data = textwrap.dedent(
"""\
{
"field1": 1,
"field2": "aabb",
"field3": [
{"col_1": -1},
{"col_1": 1, "col_2": 2},
{"col_1": 10, "col_2": 20}
]
}
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.mark.parametrize(
"file_fixture, config_kwargs",
[
("jsonl_file", {}),
("jsonl_file_utf16_encoded", {"encoding": "utf-16"}),
("json_file_with_list_of_dicts", {}),
("json_file_with_list_of_dicts_field", {"field": "field3"}),
],
)
def test_json_generate_tables(file_fixture, config_kwargs, request):
json = Json(**config_kwargs)
generator = json._generate_tables([[request.getfixturevalue(file_fixture)]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.to_pydict() == {"col_1": [-1, 1, 10], "col_2": [None, 2, 20]}
@pytest.mark.parametrize(
"file_fixture, config_kwargs",
[
(
"jsonl_file",
{"features": Features({"col_1": Value("int64"), "col_2": Value("int64"), "missing_col": Value("string")})},
),
(
"json_file_with_list_of_dicts",
{"features": Features({"col_1": Value("int64"), "col_2": Value("int64"), "missing_col": Value("string")})},
),
(
"json_file_with_list_of_dicts_field",
{
"field": "field3",
"features": Features(
{"col_1": Value("int64"), "col_2": Value("int64"), "missing_col": Value("string")}
),
},
),
],
)
def test_json_generate_tables_with_missing_features(file_fixture, config_kwargs, request):
json = Json(**config_kwargs)
generator = json._generate_tables([[request.getfixturevalue(file_fixture)]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.to_pydict() == {"col_1": [-1, 1, 10], "col_2": [None, 2, 20], "missing_col": [None, None, None]}
| 0 |
hf_public_repos/datasets/tests | hf_public_repos/datasets/tests/packaged_modules/test_spark.py | from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _get_expected_row_ids_and_row_dicts_for_partition_order(df, partition_order):
expected_row_ids_and_row_dicts = []
for part_id in partition_order:
partition = df.where(f"SPARK_PARTITION_ID() = {part_id}").collect()
for row_idx, row in enumerate(partition):
expected_row_ids_and_row_dicts.append((f"{part_id}_{row_idx}", row.asDict()))
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def test_repartition_df_if_needed():
spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
df = spark.range(100).repartition(1)
spark_builder = Spark(df)
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16)
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def test_generate_iterable_examples():
spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
df = spark.range(10).repartition(2)
partition_order = [1, 0]
generate_fn = _generate_iterable_examples(df, partition_order) # Reverse the partitions.
expected_row_ids_and_row_dicts = _get_expected_row_ids_and_row_dicts_for_partition_order(df, partition_order)
for i, (row_id, row_dict) in enumerate(generate_fn()):
expected_row_id, expected_row_dict = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def test_spark_examples_iterable():
spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
df = spark.range(10).repartition(1)
it = SparkExamplesIterable(df)
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(it):
assert row_id == f"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def test_spark_examples_iterable_shuffle():
spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
df = spark.range(30).repartition(3)
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator") as generator_mock:
generator_mock.shuffle.side_effect = lambda x: x.reverse()
expected_row_ids_and_row_dicts = _get_expected_row_ids_and_row_dicts_for_partition_order(df, [2, 1, 0])
shuffled_it = SparkExamplesIterable(df).shuffle_data_sources(generator_mock)
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(shuffled_it):
expected_row_id, expected_row_dict = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def test_spark_examples_iterable_shard():
spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
df = spark.range(20).repartition(4)
# Partitions 0 and 2
shard_it_1 = SparkExamplesIterable(df).shard_data_sources(worker_id=0, num_workers=2)
assert shard_it_1.n_shards == 2
expected_row_ids_and_row_dicts_1 = _get_expected_row_ids_and_row_dicts_for_partition_order(df, [0, 2])
for i, (row_id, row_dict) in enumerate(shard_it_1):
expected_row_id, expected_row_dict = expected_row_ids_and_row_dicts_1[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
shard_it_2 = SparkExamplesIterable(df).shard_data_sources(worker_id=1, num_workers=2)
assert shard_it_2.n_shards == 2
expected_row_ids_and_row_dicts_2 = _get_expected_row_ids_and_row_dicts_for_partition_order(df, [1, 3])
for i, (row_id, row_dict) in enumerate(shard_it_2):
expected_row_id, expected_row_dict = expected_row_ids_and_row_dicts_2[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def test_repartition_df_if_needed_max_num_df_rows():
spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
df = spark.range(100).repartition(1)
spark_builder = Spark(df)
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1)
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 0 |
hf_public_repos/datasets/tests | hf_public_repos/datasets/tests/packaged_modules/test_text.py | import textwrap
import pyarrow as pa
import pytest
from datasets import Features, Image
from datasets.packaged_modules.text.text import Text
from ..utils import require_pil
@pytest.fixture
def text_file(tmp_path):
filename = tmp_path / "text.txt"
data = textwrap.dedent(
"""\
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.
Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
Second paragraph:
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.
Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
"""
)
with open(filename, "w", encoding="utf-8") as f:
f.write(data)
return str(filename)
@pytest.fixture
def text_file_with_image(tmp_path, image_file):
filename = tmp_path / "text_with_image.txt"
with open(filename, "w", encoding="utf-8") as f:
f.write(image_file)
return str(filename)
@pytest.mark.parametrize("keep_linebreaks", [True, False])
def test_text_linebreaks(text_file, keep_linebreaks):
with open(text_file, encoding="utf-8") as f:
expected_content = f.read().splitlines(keepends=keep_linebreaks)
text = Text(keep_linebreaks=keep_linebreaks, encoding="utf-8")
generator = text._generate_tables([[text_file]])
generated_content = pa.concat_tables([table for _, table in generator]).to_pydict()["text"]
assert generated_content == expected_content
@require_pil
def test_text_cast_image(text_file_with_image):
with open(text_file_with_image, encoding="utf-8") as f:
image_file = f.read().splitlines()[0]
text = Text(encoding="utf-8", features=Features({"image": Image()}))
generator = text._generate_tables([[text_file_with_image]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.schema.field("image").type == Image()()
generated_content = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
@pytest.mark.parametrize("sample_by", ["line", "paragraph", "document"])
def test_text_sample_by(sample_by, text_file):
with open(text_file, encoding="utf-8") as f:
expected_content = f.read()
if sample_by == "line":
expected_content = expected_content.splitlines()
elif sample_by == "paragraph":
expected_content = expected_content.split("\n\n")
elif sample_by == "document":
expected_content = [expected_content]
text = Text(sample_by=sample_by, encoding="utf-8", chunksize=100)
generator = text._generate_tables([[text_file]])
generated_content = pa.concat_tables([table for _, table in generator]).to_pydict()["text"]
assert generated_content == expected_content
| 0 |
hf_public_repos/datasets/tests | hf_public_repos/datasets/tests/packaged_modules/test_csv.py | import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def csv_file(tmp_path):
filename = tmp_path / "file.csv"
data = textwrap.dedent(
"""\
header1,header2
1,2
10,20
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def malformed_csv_file(tmp_path):
filename = tmp_path / "malformed_file.csv"
data = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def csv_file_with_image(tmp_path, image_file):
filename = tmp_path / "csv_with_image.csv"
data = textwrap.dedent(
f"""\
image
{image_file}
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def csv_file_with_label(tmp_path):
filename = tmp_path / "csv_with_label.csv"
data = textwrap.dedent(
"""\
label
good
bad
good
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def csv_file_with_int_list(tmp_path):
filename = tmp_path / "csv_with_int_list.csv"
data = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
def test_csv_generate_tables_raises_error_with_malformed_csv(csv_file, malformed_csv_file, caplog):
csv = Csv()
generator = csv._generate_tables([[csv_file, malformed_csv_file]])
with pytest.raises(ValueError, match="Error tokenizing data"):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(malformed_csv_file) in record.message
for record in caplog.records
)
@require_pil
def test_csv_cast_image(csv_file_with_image):
with open(csv_file_with_image, encoding="utf-8") as f:
image_file = f.read().splitlines()[1]
csv = Csv(encoding="utf-8", features=Features({"image": Image()}))
generator = csv._generate_tables([[csv_file_with_image]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.schema.field("image").type == Image()()
generated_content = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def test_csv_cast_label(csv_file_with_label):
with open(csv_file_with_label, encoding="utf-8") as f:
labels = f.read().splitlines()[1:]
csv = Csv(encoding="utf-8", features=Features({"label": ClassLabel(names=["good", "bad"])}))
generator = csv._generate_tables([[csv_file_with_label]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.schema.field("label").type == ClassLabel(names=["good", "bad"])()
generated_content = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"]).str2int(label) for label in labels]
def test_csv_convert_int_list(csv_file_with_int_list):
csv = Csv(encoding="utf-8", sep=",", converters={"int_list": lambda x: [int(i) for i in x.split()]})
generator = csv._generate_tables([[csv_file_with_int_list]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa.types.is_list(pa_table.schema.field("int_list").type)
generated_content = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 0 |
hf_public_repos/datasets/tests | hf_public_repos/datasets/tests/packaged_modules/test_imagefolder.py | import shutil
import textwrap
import numpy as np
import pytest
from datasets import ClassLabel, Features, Image, Value
from datasets.data_files import DataFilesDict, get_data_patterns
from datasets.download.streaming_download_manager import StreamingDownloadManager
from datasets.packaged_modules.imagefolder.imagefolder import ImageFolder
from ..utils import require_pil
@pytest.fixture
def cache_dir(tmp_path):
return str(tmp_path / "imagefolder_cache_dir")
@pytest.fixture
def data_files_with_labels_no_metadata(tmp_path, image_file):
data_dir = tmp_path / "data_files_with_labels_no_metadata"
data_dir.mkdir(parents=True, exist_ok=True)
subdir_class_0 = data_dir / "cat"
subdir_class_0.mkdir(parents=True, exist_ok=True)
subdir_class_1 = data_dir / "dog"
subdir_class_1.mkdir(parents=True, exist_ok=True)
image_filename = subdir_class_0 / "image_cat.jpg"
shutil.copyfile(image_file, image_filename)
image_filename2 = subdir_class_1 / "image_dog.jpg"
shutil.copyfile(image_file, image_filename2)
data_files_with_labels_no_metadata = DataFilesDict.from_patterns(
get_data_patterns(str(data_dir)), data_dir.as_posix()
)
return data_files_with_labels_no_metadata
@pytest.fixture
def image_files_with_labels_and_duplicated_label_key_in_metadata(tmp_path, image_file):
data_dir = tmp_path / "image_files_with_labels_and_label_key_in_metadata"
data_dir.mkdir(parents=True, exist_ok=True)
subdir_class_0 = data_dir / "cat"
subdir_class_0.mkdir(parents=True, exist_ok=True)
subdir_class_1 = data_dir / "dog"
subdir_class_1.mkdir(parents=True, exist_ok=True)
image_filename = subdir_class_0 / "image_cat.jpg"
shutil.copyfile(image_file, image_filename)
image_filename2 = subdir_class_1 / "image_dog.jpg"
shutil.copyfile(image_file, image_filename2)
image_metadata_filename = tmp_path / data_dir / "metadata.jsonl"
image_metadata = textwrap.dedent(
"""\
{"file_name": "cat/image_cat.jpg", "caption": "Nice image of a cat", "label": "Cat"}
{"file_name": "dog/image_dog.jpg", "caption": "Nice image of a dog", "label": "Dog"}
"""
)
with open(image_metadata_filename, "w", encoding="utf-8") as f:
f.write(image_metadata)
return str(image_filename), str(image_filename2), str(image_metadata_filename)
@pytest.fixture
def image_file_with_metadata(tmp_path, image_file):
image_filename = tmp_path / "image_rgb.jpg"
shutil.copyfile(image_file, image_filename)
image_metadata_filename = tmp_path / "metadata.jsonl"
image_metadata = textwrap.dedent(
"""\
{"file_name": "image_rgb.jpg", "caption": "Nice image"}
"""
)
with open(image_metadata_filename, "w", encoding="utf-8") as f:
f.write(image_metadata)
return str(image_filename), str(image_metadata_filename)
@pytest.fixture
def image_files_with_metadata_that_misses_one_image(tmp_path, image_file):
image_filename = tmp_path / "image_rgb.jpg"
shutil.copyfile(image_file, image_filename)
image_filename2 = tmp_path / "image_rgb2.jpg"
shutil.copyfile(image_file, image_filename2)
image_metadata_filename = tmp_path / "metadata.jsonl"
image_metadata = textwrap.dedent(
"""\
{"file_name": "image_rgb.jpg", "caption": "Nice image"}
"""
)
with open(image_metadata_filename, "w", encoding="utf-8") as f:
f.write(image_metadata)
return str(image_filename), str(image_filename2), str(image_metadata_filename)
@pytest.fixture(params=["jsonl", "csv"])
def data_files_with_one_split_and_metadata(request, tmp_path, image_file):
data_dir = tmp_path / "imagefolder_data_dir_with_metadata_one_split"
data_dir.mkdir(parents=True, exist_ok=True)
subdir = data_dir / "subdir"
subdir.mkdir(parents=True, exist_ok=True)
image_filename = data_dir / "image_rgb.jpg"
shutil.copyfile(image_file, image_filename)
image_filename2 = data_dir / "image_rgb2.jpg"
shutil.copyfile(image_file, image_filename2)
image_filename3 = subdir / "image_rgb3.jpg" # in subdir
shutil.copyfile(image_file, image_filename3)
image_metadata_filename = data_dir / f"metadata.{request.param}"
image_metadata = (
textwrap.dedent(
"""\
{"file_name": "image_rgb.jpg", "caption": "Nice image"}
{"file_name": "image_rgb2.jpg", "caption": "Nice second image"}
{"file_name": "subdir/image_rgb3.jpg", "caption": "Nice third image"}
"""
)
if request.param == "jsonl"
else textwrap.dedent(
"""\
file_name,caption
image_rgb.jpg,Nice image
image_rgb2.jpg,Nice second image
subdir/image_rgb3.jpg,Nice third image
"""
)
)
with open(image_metadata_filename, "w", encoding="utf-8") as f:
f.write(image_metadata)
data_files_with_one_split_and_metadata = DataFilesDict.from_patterns(
get_data_patterns(str(data_dir)), data_dir.as_posix()
)
assert len(data_files_with_one_split_and_metadata) == 1
assert len(data_files_with_one_split_and_metadata["train"]) == 4
return data_files_with_one_split_and_metadata
@pytest.fixture(params=["jsonl", "csv"])
def data_files_with_two_splits_and_metadata(request, tmp_path, image_file):
data_dir = tmp_path / "imagefolder_data_dir_with_metadata_two_splits"
data_dir.mkdir(parents=True, exist_ok=True)
train_dir = data_dir / "train"
train_dir.mkdir(parents=True, exist_ok=True)
test_dir = data_dir / "test"
test_dir.mkdir(parents=True, exist_ok=True)
image_filename = train_dir / "image_rgb.jpg" # train image
shutil.copyfile(image_file, image_filename)
image_filename2 = train_dir / "image_rgb2.jpg" # train image
shutil.copyfile(image_file, image_filename2)
image_filename3 = test_dir / "image_rgb3.jpg" # test image
shutil.copyfile(image_file, image_filename3)
train_image_metadata_filename = train_dir / f"metadata.{request.param}"
image_metadata = (
textwrap.dedent(
"""\
{"file_name": "image_rgb.jpg", "caption": "Nice train image"}
{"file_name": "image_rgb2.jpg", "caption": "Nice second train image"}
"""
)
if request.param == "jsonl"
else textwrap.dedent(
"""\
file_name,caption
image_rgb.jpg,Nice train image
image_rgb2.jpg,Nice second train image
"""
)
)
with open(train_image_metadata_filename, "w", encoding="utf-8") as f:
f.write(image_metadata)
test_image_metadata_filename = test_dir / f"metadata.{request.param}"
image_metadata = (
textwrap.dedent(
"""\
{"file_name": "image_rgb3.jpg", "caption": "Nice test image"}
"""
)
if request.param == "jsonl"
else textwrap.dedent(
"""\
file_name,caption
image_rgb3.jpg,Nice test image
"""
)
)
with open(test_image_metadata_filename, "w", encoding="utf-8") as f:
f.write(image_metadata)
data_files_with_two_splits_and_metadata = DataFilesDict.from_patterns(
get_data_patterns(str(data_dir)), data_dir.as_posix()
)
assert len(data_files_with_two_splits_and_metadata) == 2
assert len(data_files_with_two_splits_and_metadata["train"]) == 3
assert len(data_files_with_two_splits_and_metadata["test"]) == 2
return data_files_with_two_splits_and_metadata
@pytest.fixture
def data_files_with_zip_archives(tmp_path, image_file):
from PIL import Image, ImageOps
data_dir = tmp_path / "imagefolder_data_dir_with_zip_archives"
data_dir.mkdir(parents=True, exist_ok=True)
archive_dir = data_dir / "archive"
archive_dir.mkdir(parents=True, exist_ok=True)
subdir = archive_dir / "subdir"
subdir.mkdir(parents=True, exist_ok=True)
image_filename = archive_dir / "image_rgb.jpg"
shutil.copyfile(image_file, image_filename)
image_filename2 = subdir / "image_rgb2.jpg" # in subdir
# make sure they're two different images
# Indeed we won't be able to compare the image.filename, since the archive is not extracted in streaming mode
ImageOps.flip(Image.open(image_file)).save(image_filename2)
image_metadata_filename = archive_dir / "metadata.jsonl"
image_metadata = textwrap.dedent(
"""\
{"file_name": "image_rgb.jpg", "caption": "Nice image"}
{"file_name": "subdir/image_rgb2.jpg", "caption": "Nice second image"}
"""
)
with open(image_metadata_filename, "w", encoding="utf-8") as f:
f.write(image_metadata)
shutil.make_archive(archive_dir, "zip", archive_dir)
shutil.rmtree(str(archive_dir))
data_files_with_zip_archives = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix())
assert len(data_files_with_zip_archives) == 1
assert len(data_files_with_zip_archives["train"]) == 1
return data_files_with_zip_archives
@require_pil
# check that labels are inferred correctly from dir names
def test_generate_examples_with_labels(data_files_with_labels_no_metadata, cache_dir):
# there are no metadata.jsonl files in this test case
imagefolder = ImageFolder(data_files=data_files_with_labels_no_metadata, cache_dir=cache_dir, drop_labels=False)
imagefolder.download_and_prepare()
assert imagefolder.info.features == Features({"image": Image(), "label": ClassLabel(names=["cat", "dog"])})
dataset = list(imagefolder.as_dataset()["train"])
label_feature = imagefolder.info.features["label"]
assert dataset[0]["label"] == label_feature._str2int["cat"]
assert dataset[1]["label"] == label_feature._str2int["dog"]
@require_pil
@pytest.mark.parametrize("drop_metadata", [None, True, False])
@pytest.mark.parametrize("drop_labels", [None, True, False])
def test_generate_examples_duplicated_label_key(
image_files_with_labels_and_duplicated_label_key_in_metadata, drop_metadata, drop_labels, cache_dir, caplog
):
cat_image_file, dog_image_file, image_metadata_file = image_files_with_labels_and_duplicated_label_key_in_metadata
imagefolder = ImageFolder(
drop_metadata=drop_metadata,
drop_labels=drop_labels,
data_files=[cat_image_file, dog_image_file, image_metadata_file],
cache_dir=cache_dir,
)
if drop_labels is False:
# infer labels from directories even if metadata files are found
imagefolder.download_and_prepare()
warning_in_logs = any("ignoring metadata columns" in record.msg.lower() for record in caplog.records)
assert warning_in_logs if drop_metadata is not True else not warning_in_logs
dataset = imagefolder.as_dataset()["train"]
assert imagefolder.info.features["label"] == ClassLabel(names=["cat", "dog"])
assert all(example["label"] in imagefolder.info.features["label"]._str2int.values() for example in dataset)
else:
imagefolder.download_and_prepare()
dataset = imagefolder.as_dataset()["train"]
if drop_metadata is not True:
# labels are from metadata
assert imagefolder.info.features["label"] == Value("string")
assert all(example["label"] in ["Cat", "Dog"] for example in dataset)
else:
# drop both labels and metadata
assert imagefolder.info.features == Features({"image": Image()})
assert all(example.keys() == {"image"} for example in dataset)
@require_pil
@pytest.mark.parametrize("drop_metadata", [None, True, False])
@pytest.mark.parametrize("drop_labels", [None, True, False])
def test_generate_examples_drop_labels(data_files_with_labels_no_metadata, drop_metadata, drop_labels):
imagefolder = ImageFolder(
drop_metadata=drop_metadata, drop_labels=drop_labels, data_files=data_files_with_labels_no_metadata
)
gen_kwargs = imagefolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
# removing the labels explicitly requires drop_labels=True
assert gen_kwargs["add_labels"] is not bool(drop_labels)
assert gen_kwargs["add_metadata"] is False
generator = imagefolder._generate_examples(**gen_kwargs)
if not drop_labels:
assert all(
example.keys() == {"image", "label"} and all(val is not None for val in example.values())
for _, example in generator
)
else:
assert all(
example.keys() == {"image"} and all(val is not None for val in example.values())
for _, example in generator
)
@require_pil
@pytest.mark.parametrize("drop_metadata", [None, True, False])
@pytest.mark.parametrize("drop_labels", [None, True, False])
def test_generate_examples_drop_metadata(image_file_with_metadata, drop_metadata, drop_labels):
image_file, image_metadata_file = image_file_with_metadata
imagefolder = ImageFolder(
drop_metadata=drop_metadata, drop_labels=drop_labels, data_files={"train": [image_file, image_metadata_file]}
)
gen_kwargs = imagefolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
# since the dataset has metadata, removing the metadata explicitly requires drop_metadata=True
assert gen_kwargs["add_metadata"] is not bool(drop_metadata)
# since the dataset has metadata, adding the labels explicitly requires drop_labels=False
assert gen_kwargs["add_labels"] is (drop_labels is False)
generator = imagefolder._generate_examples(**gen_kwargs)
expected_columns = {"image"}
if gen_kwargs["add_metadata"]:
expected_columns.add("caption")
if gen_kwargs["add_labels"]:
expected_columns.add("label")
result = [example for _, example in generator]
assert len(result) == 1
example = result[0]
assert example.keys() == expected_columns
for column in expected_columns:
assert example[column] is not None
@require_pil
@pytest.mark.parametrize("drop_metadata", [None, True, False])
def test_generate_examples_with_metadata_in_wrong_location(image_file, image_file_with_metadata, drop_metadata):
_, image_metadata_file = image_file_with_metadata
imagefolder = ImageFolder(drop_metadata=drop_metadata, data_files={"train": [image_file, image_metadata_file]})
gen_kwargs = imagefolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
generator = imagefolder._generate_examples(**gen_kwargs)
if not drop_metadata:
with pytest.raises(ValueError):
list(generator)
else:
assert all(
example.keys() == {"image"} and all(val is not None for val in example.values())
for _, example in generator
)
@require_pil
@pytest.mark.parametrize("drop_metadata", [None, True, False])
def test_generate_examples_with_metadata_that_misses_one_image(
image_files_with_metadata_that_misses_one_image, drop_metadata
):
image_file, image_file2, image_metadata_file = image_files_with_metadata_that_misses_one_image
if not drop_metadata:
features = Features({"image": Image(), "caption": Value("string")})
else:
features = Features({"image": Image()})
imagefolder = ImageFolder(
drop_metadata=drop_metadata,
features=features,
data_files={"train": [image_file, image_file2, image_metadata_file]},
)
gen_kwargs = imagefolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
generator = imagefolder._generate_examples(**gen_kwargs)
if not drop_metadata:
with pytest.raises(ValueError):
list(generator)
else:
assert all(
example.keys() == {"image"} and all(val is not None for val in example.values())
for _, example in generator
)
@require_pil
@pytest.mark.parametrize("streaming", [False, True])
def test_data_files_with_metadata_and_single_split(streaming, cache_dir, data_files_with_one_split_and_metadata):
data_files = data_files_with_one_split_and_metadata
imagefolder = ImageFolder(data_files=data_files, cache_dir=cache_dir)
imagefolder.download_and_prepare()
datasets = imagefolder.as_streaming_dataset() if streaming else imagefolder.as_dataset()
for split, data_files in data_files.items():
expected_num_of_images = len(data_files) - 1 # don't count the metadata file
assert split in datasets
dataset = list(datasets[split])
assert len(dataset) == expected_num_of_images
# make sure each sample has its own image and metadata
assert len({example["image"].filename for example in dataset}) == expected_num_of_images
assert len({example["caption"] for example in dataset}) == expected_num_of_images
assert all(example["caption"] is not None for example in dataset)
@require_pil
@pytest.mark.parametrize("streaming", [False, True])
def test_data_files_with_metadata_and_multiple_splits(streaming, cache_dir, data_files_with_two_splits_and_metadata):
data_files = data_files_with_two_splits_and_metadata
imagefolder = ImageFolder(data_files=data_files, cache_dir=cache_dir)
imagefolder.download_and_prepare()
datasets = imagefolder.as_streaming_dataset() if streaming else imagefolder.as_dataset()
for split, data_files in data_files.items():
expected_num_of_images = len(data_files) - 1 # don't count the metadata file
assert split in datasets
dataset = list(datasets[split])
assert len(dataset) == expected_num_of_images
# make sure each sample has its own image and metadata
assert len({example["image"].filename for example in dataset}) == expected_num_of_images
assert len({example["caption"] for example in dataset}) == expected_num_of_images
assert all(example["caption"] is not None for example in dataset)
@require_pil
@pytest.mark.parametrize("streaming", [False, True])
def test_data_files_with_metadata_and_archives(streaming, cache_dir, data_files_with_zip_archives):
imagefolder = ImageFolder(data_files=data_files_with_zip_archives, cache_dir=cache_dir)
imagefolder.download_and_prepare()
datasets = imagefolder.as_streaming_dataset() if streaming else imagefolder.as_dataset()
for split, data_files in data_files_with_zip_archives.items():
num_of_archives = len(data_files) # the metadata file is inside the archive
expected_num_of_images = 2 * num_of_archives
assert split in datasets
dataset = list(datasets[split])
assert len(dataset) == expected_num_of_images
# make sure each sample has its own image and metadata
assert len({np.array(example["image"])[0, 0, 0] for example in dataset}) == expected_num_of_images
assert len({example["caption"] for example in dataset}) == expected_num_of_images
assert all(example["caption"] is not None for example in dataset)
@require_pil
def test_data_files_with_wrong_metadata_file_name(cache_dir, tmp_path, image_file):
data_dir = tmp_path / "data_dir_with_bad_metadata"
data_dir.mkdir(parents=True, exist_ok=True)
shutil.copyfile(image_file, data_dir / "image_rgb.jpg")
image_metadata_filename = data_dir / "bad_metadata.jsonl" # bad file
image_metadata = textwrap.dedent(
"""\
{"file_name": "image_rgb.jpg", "caption": "Nice image"}
"""
)
with open(image_metadata_filename, "w", encoding="utf-8") as f:
f.write(image_metadata)
data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix())
imagefolder = ImageFolder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir)
imagefolder.download_and_prepare()
dataset = imagefolder.as_dataset(split="train")
# check that there are no metadata, since the metadata file name doesn't have the right name
assert "caption" not in dataset.column_names
@require_pil
def test_data_files_with_wrong_image_file_name_column_in_metadata_file(cache_dir, tmp_path, image_file):
data_dir = tmp_path / "data_dir_with_bad_metadata"
data_dir.mkdir(parents=True, exist_ok=True)
shutil.copyfile(image_file, data_dir / "image_rgb.jpg")
image_metadata_filename = data_dir / "metadata.jsonl"
image_metadata = textwrap.dedent( # with bad column "bad_file_name" instead of "file_name"
"""\
{"bad_file_name": "image_rgb.jpg", "caption": "Nice image"}
"""
)
with open(image_metadata_filename, "w", encoding="utf-8") as f:
f.write(image_metadata)
data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix())
imagefolder = ImageFolder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir)
with pytest.raises(ValueError) as exc_info:
imagefolder.download_and_prepare()
assert "`file_name` must be present" in str(exc_info.value)
@require_pil
def test_data_files_with_with_metadata_in_different_formats(cache_dir, tmp_path, image_file):
data_dir = tmp_path / "data_dir_with_metadata_in_different_format"
data_dir.mkdir(parents=True, exist_ok=True)
shutil.copyfile(image_file, data_dir / "image_rgb.jpg")
image_metadata_filename_jsonl = data_dir / "metadata.jsonl"
image_metadata_jsonl = textwrap.dedent(
"""\
{"file_name": "image_rgb.jpg", "caption": "Nice image"}
"""
)
with open(image_metadata_filename_jsonl, "w", encoding="utf-8") as f:
f.write(image_metadata_jsonl)
image_metadata_filename_csv = data_dir / "metadata.csv"
image_metadata_csv = textwrap.dedent(
"""\
file_name,caption
image_rgb.jpg,Nice image
"""
)
with open(image_metadata_filename_csv, "w", encoding="utf-8") as f:
f.write(image_metadata_csv)
data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix())
imagefolder = ImageFolder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir)
with pytest.raises(ValueError) as exc_info:
imagefolder.download_and_prepare()
assert "metadata files with different extensions" in str(exc_info.value)
| 0 |
hf_public_repos/datasets | hf_public_repos/datasets/docs/README.md | <!---
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
# Generating the documentation
To generate the documentation, you first have to build it. Several packages are necessary to build the doc,
you can install them with the following command, at the root of the code repository:
```bash
pip install -e ".[docs]"
```
Then you need to install our special tool that builds the documentation:
```bash
pip install git+https://github.com/huggingface/doc-builder
```
---
**NOTE**
You only need to generate the documentation to inspect it locally (if you're planning changes and want to
check how they look like before committing for instance). You don't have to commit the built documentation.
---
## Building the documentation
Once you have setup the `doc-builder` and additional packages, you can generate the documentation by typing th
following command:
```bash
doc-builder build datasets docs/source/ --build_dir ~/tmp/test-build
```
You can adapt the `--build_dir` to set any temporary folder that you prefer. This command will create it and generate
the MDX files that will be rendered as the documentation on the main website. You can inspect them in your favorite
Markdown editor.
---
**NOTE**
It's not possible to see locally how the final documentation will look like for now. Once you have opened a PR, you
will see a bot add a comment to a link where the documentation with your changes lives.
---
## Adding a new element to the navigation bar
Accepted files are Markdown (.md or .mdx).
Create a file with its extension and put it in the source directory. You can then link it to the toc-tree by putting
the filename without the extension in the [`_toctree.yml`](https://github.com/huggingface/transformers/blob/master/docs/source/_toctree.yml) file.
## Renaming section headers and moving sections
It helps to keep the old links working when renaming section header and/or moving sections from one document to another. This is because the old links are likely to be used in Issues, Forums and Social media and it'd be make for a much more superior user experience if users reading those months later could still easily navigate to the originally intended information.
Therefore we simply keep a little map of moved sections at the end of the document where the original section was. The key is to preserve the original anchor.
So if you renamed a section from: "Section A" to "Section B", then you can add at the end of the file:
```
Sections that were moved:
[ <a href="#section-b">Section A</a><a id="section-a"></a> ]
```
and of course if you moved it to another file, then:
```
Sections that were moved:
[ <a href="../new-file#section-b">Section A</a><a id="section-a"></a> ]
```
Use the relative style to link to the new file so that the versioned docs continue to work.
For an example of a rich moved sections set please see the very end of [the Trainer doc](https://github.com/huggingface/transformers/blob/master/docs/source/main_classes/trainer.mdx).
## Writing Documentation - Specification
The `huggingface/transformers` documentation follows the
[Google documentation](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) style for docstrings,
although we can write them directly in Markdown.
### Adding a new tutorial
Adding a new tutorial or section is done in two steps:
- Add a new file under `./source`. This file can either be ReStructuredText (.rst) or Markdown (.md).
- Link that file in `./source/_toctree.yml` on the correct toc-tree.
Make sure to put your new file under the proper section. It's unlikely to go in the first section (*Get Started*), so
depending on the intended targets (beginners, more advanced users or researchers) it should go in section two, three or
four.
### Adding a new model
When adding a new model:
- Create a file `xxx.mdx` or under `./source/model_doc` (don't hesitate to copy an existing file as template).
- Link that file in `./source/_toctree.yml`.
- Write a short overview of the model:
- Overview with paper & authors
- Paper abstract
- Tips and tricks and how to use it best
- Add the classes that should be linked in the model. This generally includes the configuration, the tokenizer, and
every model of that class (the base model, alongside models with additional heads), both in PyTorch and TensorFlow.
The order is generally:
- Configuration,
- Tokenizer
- PyTorch base model
- PyTorch head models
- TensorFlow base model
- TensorFlow head models
- Flax base model
- Flax head models
These classes should be added using our Markdown syntax. Usually as follows:
```
## XXXConfig
[[autodoc]] XXXConfig
```
This will include every public method of the configuration that is documented. If for some reason you wish for a method
not to be displayed in the documentation, you can do so by specifying which methods should be in the docs:
```
## XXXTokenizer
[[autodoc]] XXXTokenizer
- build_inputs_with_special_tokens
- get_special_tokens_mask
- create_token_type_ids_from_sequences
- save_vocabulary
```
If you just want to add a method that is not documented (for instance magic method like `__call__` are not documented
byt default) you can put the list of methods to add in a list that contains `all`:
```
## XXXTokenizer
[[autodoc]] XXXTokenizer
- all
- __call__
```
### Writing source documentation
Values that should be put in `code` should either be surrounded by backticks: \`like so\`. Note that argument names
and objects like True, None or any strings should usually be put in `code`.
When mentioning a class, function or method, it is recommended to use our syntax for internal links so that our tool
adds a link to its documentation with this syntax: \[\`XXXClass\`\] or \[\`function\`\]. This requires the class or
function to be in the main package.
If you want to create a link to some internal class or function, you need to
provide its path. For instance: \[\`file_utils.ModelOutput\`\]. This will be converted into a link with
`file_utils.ModelOutput` in the description. To get rid of the path and only keep the name of the object you are
linking to in the description, add a ~: \[\`~file_utils.ModelOutput\`\] will generate a link with `ModelOutput` in the description.
The same works for methods so you can either use \[\`XXXClass.method\`\] or \[~\`XXXClass.method\`\].
#### Defining arguments in a method
Arguments should be defined with the `Args:` (or `Arguments:` or `Parameters:`) prefix, followed by a line return and
an indentation. The argument should be followed by its type, with its shape if it is a tensor, a colon and its
description:
```
Args:
n_layers (`int`): The number of layers of the model.
```
If the description is too long to fit in one line, another indentation is necessary before writing the description
after th argument.
Here's an example showcasing everything so far:
```
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AlbertTokenizer`]. See [`~PreTrainedTokenizer.encode`] and
[`~PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
```
For optional arguments or arguments with defaults we follow the following syntax: imagine we have a function with the
following signature:
```
def my_function(x: str = None, a: float = 1):
```
then its documentation should look like this:
```
Args:
x (`str`, *optional*):
This argument controls ...
a (`float`, *optional*, defaults to 1):
This argument is used to ...
```
Note that we always omit the "defaults to \`None\`" when None is the default for any argument. Also note that even
if the first line describing your argument type and its default gets long, you can't break it on several lines. You can
however write as many lines as you want in the indented description (see the example above with `input_ids`).
#### Writing a multi-line code block
Multi-line code blocks can be useful for displaying examples. They are done between two lines of three backticks as usual in Markdown:
````
```
# first line of code
# second line
# etc
```
````
We follow the [doctest](https://docs.python.org/3/library/doctest.html) syntax for the examples to automatically test
the results stay consistent with the library.
#### Writing a return block
The return block should be introduced with the `Returns:` prefix, followed by a line return and an indentation.
The first line should be the type of the return, followed by a line return. No need to indent further for the elements
building the return.
Here's an example for a single value return:
```
Returns:
`List[int]`: A list of integers in the range [0, 1] --- 1 for a special token, 0 for a sequence token.
```
Here's an example for tuple return, comprising several objects:
```
Returns:
`tuple(torch.FloatTensor)` comprising various elements depending on the configuration ([`BertConfig`]) and inputs:
- ** loss** (*optional*, returned when `masked_lm_labels` is provided) `torch.FloatTensor` of shape `(1,)` --
Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
- **prediction_scores** (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`) --
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
```
#### Adding an image
Due to the rapidly growing repository, it is important to make sure that no files that would significantly weigh down the repository are added. This includes images, videos and other non-text files. We prefer to leverage a hf.co hosted `dataset` like
the ones hosted on [`hf-internal-testing`](https://huggingface.co/hf-internal-testing) in which to place these files and reference
them by URL. We recommend putting them in the following dataset: [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images).
If an external contribution, feel free to add the images to your PR and ask a Hugging Face member to migrate your images
to this dataset.
## Styling the docstring
We have an automatic script running with the `make style` comment that will make sure that:
- the docstrings fully take advantage of the line width
- all code examples are formatted using black, like the code of the Transformers library
This script may have some weird failures if you made a syntax mistake or if you uncover a bug. Therefore, it's
recommended to commit your changes before running `make style`, so you can revert the changes done by that script
easily.
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/quickstart.mdx | <!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Quickstart
[[open-in-colab]]
This quickstart is intended for developers who are ready to dive into the code and see an example of how to integrate π€ Datasets into their model training workflow. If you're a beginner, we recommend starting with our [tutorials](./tutorial), where you'll get a more thorough introduction.
Each dataset is unique, and depending on the task, some datasets may require additional steps to prepare it for training. But you can always use π€ Datasets tools to load and process a dataset. The fastest and easiest way to get started is by loading an existing dataset from the [Hugging Face Hub](https://huggingface.co/datasets). There are thousands of datasets to choose from, spanning many tasks. Choose the type of dataset you want to work with, and let's get started!
<div class="mt-4">
<div class="w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-3 md:gap-y-4 md:gap-x-5">
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="#audio"
><div class="w-full text-center bg-gradient-to-r from-violet-300 via-sky-400 to-green-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Audio</div>
<p class="text-gray-700">Resample an audio dataset and get it ready for a model to classify what type of banking issue a speaker is calling about.</p>
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="#vision"
><div class="w-full text-center bg-gradient-to-r from-pink-400 via-purple-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Vision</div>
<p class="text-gray-700">Apply data augmentation to an image dataset and get it ready for a model to diagnose disease in bean plants.</p>
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="#nlp"
><div class="w-full text-center bg-gradient-to-r from-orange-300 via-red-400 to-violet-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">NLP</div>
<p class="text-gray-700">Tokenize a dataset and get it ready for a model to determine whether a pair of sentences have the same meaning.</p>
</a>
</div>
</div>
<Tip>
Check out [Chapter 5](https://huggingface.co/course/chapter5/1?fw=pt) of the Hugging Face course to learn more about other important topics such as loading remote or local datasets, tools for cleaning up a dataset, and creating your own dataset.
</Tip>
Start by installing π€ Datasets:
```bash
pip install datasets
```
π€ Datasets also support audio and image data formats:
* To work with audio datasets, install the [`Audio`] feature:
```bash
pip install datasets[audio]
```
* To work with image datasets, install the [`Image`] feature:
```bash
pip install datasets[vision]
```
Besides π€ Datasets, make sure your preferred machine learning framework is installed:
<frameworkcontent>
<pt>
```bash
pip install torch
```
</pt>
<tf>
```bash
pip install tensorflow
```
</tf>
</frameworkcontent>
## Audio
Audio datasets are loaded just like text datasets. However, an audio dataset is preprocessed a bit differently. Instead of a tokenizer, you'll need a [feature extractor](https://huggingface.co/docs/transformers/main_classes/feature_extractor#feature-extractor). An audio input may also require resampling its sampling rate to match the sampling rate of the pretrained model you're using. In this quickstart, you'll prepare the [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) dataset for a model train on and classify the banking issue a customer is having.
**1**. Load the MInDS-14 dataset by providing the [`load_dataset`] function with the dataset name, dataset configuration (not all datasets will have a configuration), and a dataset split:
```py
>>> from datasets import load_dataset, Audio
>>> dataset = load_dataset("PolyAI/minds14", "en-US", split="train")
```
**2**. Next, load a pretrained [Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base) model and its corresponding feature extractor from the [π€ Transformers](https://huggingface.co/transformers/) library. It is totally normal to see a warning after you load the model about some weights not being initialized. This is expected because you are loading this model checkpoint for training with another task.
```py
>>> from transformers import AutoModelForAudioClassification, AutoFeatureExtractor
>>> model = AutoModelForAudioClassification.from_pretrained("facebook/wav2vec2-base")
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base")
```
**3**. The [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) dataset card indicates the sampling rate is 8kHz, but the Wav2Vec2 model was pretrained on a sampling rate of 16kHZ. You'll need to upsample the `audio` column with the [`~Dataset.cast_column`] function and [`Audio`] feature to match the model's sampling rate.
```py
>>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16000))
>>> dataset[0]["audio"]
{'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ...,
3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32),
'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav',
'sampling_rate': 16000}
```
**4**. Create a function to preprocess the audio `array` with the feature extractor, and truncate and pad the sequences into tidy rectangular tensors. The most important thing to remember is to call the audio `array` in the feature extractor since the `array` - the actual speech signal - is the model input.
Once you have a preprocessing function, use the [`~Dataset.map`] function to speed up processing by applying the function to batches of examples in the dataset.
```py
>>> def preprocess_function(examples):
... audio_arrays = [x["array"] for x in examples["audio"]]
... inputs = feature_extractor(
... audio_arrays,
... sampling_rate=16000,
... padding=True,
... max_length=100000,
... truncation=True,
... )
... return inputs
>>> dataset = dataset.map(preprocess_function, batched=True)
```
**5**. Use the [`~Dataset.rename_column`] function to rename the `intent_class` column to `labels`, which is the expected input name in [Wav2Vec2ForSequenceClassification](https://huggingface.co/docs/transformers/main/en/model_doc/wav2vec2#transformers.Wav2Vec2ForSequenceClassification):
```py
>>> dataset = dataset.rename_column("intent_class", "labels")
```
**6**. Set the dataset format according to the machine learning framework you're using.
<frameworkcontent>
<pt>
Use the [`~Dataset.set_format`] function to set the dataset format to `torch` and specify the columns you want to format. This function applies formatting on-the-fly. After converting to PyTorch tensors, wrap the dataset in [`torch.utils.data.DataLoader`](https://alband.github.io/doc_view/data.html?highlight=torch%20utils%20data%20dataloader#torch.utils.data.DataLoader):
```py
>>> from torch.utils.data import DataLoader
>>> dataset.set_format(type="torch", columns=["input_values", "labels"])
>>> dataloader = DataLoader(dataset, batch_size=4)
```
</pt>
<tf>
Use the [`~transformers.TFPreTrainedModel.prepare_tf_dataset`] method from π€ Transformers to prepare the dataset to be compatible with
TensorFlow, and ready to train/fine-tune a model, as it wraps a HuggingFace [`~datasets.Dataset`] as a `tf.data.Dataset`
with collation and batching, so one can pass it directly to Keras methods like `fit()` without further modification.
```py
>>> import tensorflow as tf
>>> tf_dataset = model.prepare_tf_dataset(
... dataset,
... batch_size=4,
... shuffle=True,
... )
```
</tf>
</frameworkcontent>
**7**. Start training with your machine learning framework! Check out the π€ Transformers [audio classification guide](https://huggingface.co/docs/transformers/tasks/audio_classification) for an end-to-end example of how to train a model on an audio dataset.
## Vision
Image datasets are loaded just like text datasets. However, instead of a tokenizer, you'll need a [feature extractor](https://huggingface.co/docs/transformers/main_classes/feature_extractor#feature-extractor) to preprocess the dataset. Applying data augmentation to an image is common in computer vision to make the model more robust against overfitting. You're free to use any data augmentation library you want, and then you can apply the augmentations with π€ Datasets. In this quickstart, you'll load the [Beans](https://huggingface.co/datasets/beans) dataset and get it ready for the model to train on and identify disease from the leaf images.
**1**. Load the Beans dataset by providing the [`load_dataset`] function with the dataset name and a dataset split:
```py
>>> from datasets import load_dataset, Image
>>> dataset = load_dataset("beans", split="train")
```
**2**. Now you can add some data augmentations with any library ([Albumentations](https://albumentations.ai/), [imgaug](https://imgaug.readthedocs.io/en/latest/), [Kornia](https://kornia.readthedocs.io/en/latest/)) you like. Here, you'll use [torchvision](https://pytorch.org/vision/stable/transforms.html) to randomly change the color properties of an image:
```py
>>> from torchvision.transforms import Compose, ColorJitter, ToTensor
>>> jitter = Compose(
... [ColorJitter(brightness=0.5, hue=0.5), ToTensor()]
... )
```
**3**. Create a function to apply your transform to the dataset and generate the model input: `pixel_values`.
```python
>>> def transforms(examples):
... examples["pixel_values"] = [jitter(image.convert("RGB")) for image in examples["image"]]
... return examples
```
**4**. Use the [`~Dataset.with_transform`] function to apply the data augmentations on-the-fly:
```py
>>> dataset = dataset.with_transform(transforms)
```
**5**. Set the dataset format according to the machine learning framework you're using.
<frameworkcontent>
<pt>
Wrap the dataset in [`torch.utils.data.DataLoader`](https://alband.github.io/doc_view/data.html?highlight=torch%20utils%20data%20dataloader#torch.utils.data.DataLoader). You'll also need to create a collate function to collate the samples into batches:
```py
>>> from torch.utils.data import DataLoader
>>> def collate_fn(examples):
... images = []
... labels = []
... for example in examples:
... images.append((example["pixel_values"]))
... labels.append(example["labels"])
...
... pixel_values = torch.stack(images)
... labels = torch.tensor(labels)
... return {"pixel_values": pixel_values, "labels": labels}
>>> dataloader = DataLoader(dataset, collate_fn=collate_fn, batch_size=4)
```
</pt>
<tf>
Use the [`~transformers.TFPreTrainedModel.prepare_tf_dataset`] method from π€ Transformers to prepare the dataset to be compatible with
TensorFlow, and ready to train/fine-tune a model, as it wraps a HuggingFace [`~datasets.Dataset`] as a `tf.data.Dataset`
with collation and batching, so one can pass it directly to Keras methods like `fit()` without further modification.
Before you start, make sure you have up-to-date versions of `albumentations` and `cv2` installed:
```bash
pip install -U albumentations opencv-python
```
```py
>>> import albumentations
>>> import numpy as np
>>> transform = albumentations.Compose([
... albumentations.RandomCrop(width=256, height=256),
... albumentations.HorizontalFlip(p=0.5),
... albumentations.RandomBrightnessContrast(p=0.2),
... ])
>>> def transforms(examples):
... examples["pixel_values"] = [
... transform(image=np.array(image))["image"] for image in examples["image"]
... ]
... return examples
>>> dataset.set_transform(transforms)
>>> tf_dataset = model.prepare_tf_dataset(
... dataset,
... batch_size=4,
... shuffle=True,
... )
```
</tf>
</frameworkcontent>
**6**. Start training with your machine learning framework! Check out the π€ Transformers [image classification guide](https://huggingface.co/docs/transformers/tasks/image_classification) for an end-to-end example of how to train a model on an image dataset.
## NLP
Text needs to be tokenized into individual tokens by a [tokenizer](https://huggingface.co/docs/transformers/main_classes/tokenizer). For the quickstart, you'll load the [Microsoft Research Paraphrase Corpus (MRPC)](https://huggingface.co/datasets/glue/viewer/mrpc) training dataset to train a model to determine whether a pair of sentences mean the same thing.
**1**. Load the MRPC dataset by providing the [`load_dataset`] function with the dataset name, dataset configuration (not all datasets will have a configuration), and dataset split:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("glue", "mrpc", split="train")
```
**2**. Next, load a pretrained [BERT](https://huggingface.co/bert-base-uncased) model and its corresponding tokenizer from the [π€ Transformers](https://huggingface.co/transformers/) library. It is totally normal to see a warning after you load the model about some weights not being initialized. This is expected because you are loading this model checkpoint for training with another task.
```py
>>> from transformers import AutoModelForSequenceClassification, AutoTokenizer
>>> model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
===PT-TF-SPLIT===
>>> from transformers import TFAutoModelForSequenceClassification, AutoTokenizer
>>> model = TFAutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
```
**3**. Create a function to tokenize the dataset, and you should also truncate and pad the text into tidy rectangular tensors. The tokenizer generates three new columns in the dataset: `input_ids`, `token_type_ids`, and an `attention_mask`. These are the model inputs.
Use the [`~Dataset.map`] function to speed up processing by applying your tokenization function to batches of examples in the dataset:
```py
>>> def encode(examples):
... return tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, padding="max_length")
>>> dataset = dataset.map(encode, batched=True)
>>> dataset[0]
{'sentence1': 'Amrozi accused his brother , whom he called " the witness " , of deliberately distorting his evidence .',
'sentence2': 'Referring to him as only " the witness " , Amrozi accused his brother of deliberately distorting his evidence .',
'label': 1,
'idx': 0,
'input_ids': array([ 101, 7277, 2180, 5303, 4806, 1117, 1711, 117, 2292, 1119, 1270, 107, 1103, 7737, 107, 117, 1104, 9938, 4267, 12223, 21811, 1117, 2554, 119, 102, 11336, 6732, 3384, 1106, 1140, 1112, 1178, 107, 1103, 7737, 107, 117, 7277, 2180, 5303, 4806, 1117, 1711, 1104, 9938, 4267, 12223, 21811, 1117, 2554, 119, 102]),
'token_type_ids': array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
'attention_mask': array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])}
```
**4**. Rename the `label` column to `labels`, which is the expected input name in [BertForSequenceClassification](https://huggingface.co/docs/transformers/main/en/model_doc/bert#transformers.BertForSequenceClassification):
```py
>>> dataset = dataset.map(lambda examples: {"labels": examples["label"]}, batched=True)
```
**5**. Set the dataset format according to the machine learning framework you're using.
<frameworkcontent>
<pt>
Use the [`~Dataset.set_format`] function to set the dataset format to `torch` and specify the columns you want to format. This function applies formatting on-the-fly. After converting to PyTorch tensors, wrap the dataset in [`torch.utils.data.DataLoader`](https://alband.github.io/doc_view/data.html?highlight=torch%20utils%20data%20dataloader#torch.utils.data.DataLoader):
```py
>>> import torch
>>> dataset.set_format(type="torch", columns=["input_ids", "token_type_ids", "attention_mask", "labels"])
>>> dataloader = torch.utils.data.DataLoader(dataset, batch_size=32)
```
</pt>
<tf>
Use the [`~transformers.TFPreTrainedModel.prepare_tf_dataset`] method from π€ Transformers to prepare the dataset to be compatible with
TensorFlow, and ready to train/fine-tune a model, as it wraps a HuggingFace [`~datasets.Dataset`] as a `tf.data.Dataset`
with collation and batching, so one can pass it directly to Keras methods like `fit()` without further modification.
```py
>>> import tensorflow as tf
>>> tf_dataset = model.prepare_tf_dataset(
... dataset,
... batch_size=4,
... shuffle=True,
... )
```
</tf>
</frameworkcontent>
**6**. Start training with your machine learning framework! Check out the π€ Transformers [text classification guide](https://huggingface.co/docs/transformers/tasks/sequence_classification) for an end-to-end example of how to train a model on a text dataset.
## What's next?
This completes the π€ Datasets quickstart! You can load any text, audio, or image dataset with a single function and get it ready for your model to train on.
For your next steps, take a look at our [How-to guides](./how_to) and learn how to do more specific things like loading different dataset formats, aligning labels, and streaming large datasets. If you're interested in learning more about π€ Datasets core concepts, grab a cup of coffee and read our [Conceptual Guides](./about_arrow)!
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/audio_dataset.mdx | # Create an audio dataset
You can share a dataset with your team or with anyone in the community by creating a dataset repository on the Hugging Face Hub:
```py
from datasets import load_dataset
dataset = load_dataset("<username>/my_dataset")
```
There are several methods for creating and sharing an audio dataset:
* Create an audio dataset from local files in python with [`Dataset.push_to_hub`]. This is an easy way that requires only a few steps in python.
* Create an audio dataset repository with the `AudioFolder` builder. This is a no-code solution for quickly creating an audio dataset with several thousand audio files.
* Create an audio dataset by writing a loading script. This method is for advanced users and requires more effort and coding, but you have greater flexibility over how a dataset is defined, downloaded, and generated which can be useful for more complex or large scale audio datasets.
<Tip>
You can control access to your dataset by requiring users to share their contact information first. Check out the [Gated datasets](https://huggingface.co/docs/hub/datasets-gated) guide for more information about how to enable this feature on the Hub.
</Tip>
## Local files
You can load your own dataset using the paths to your audio files. Use the [`~Dataset.cast_column`] function to take a column of audio file paths, and cast it to the [`Audio`] feature:
```py
>>> audio_dataset = Dataset.from_dict({"audio": ["path/to/audio_1", "path/to/audio_2", ..., "path/to/audio_n"]}).cast_column("audio", Audio())
>>> audio_dataset[0]["audio"]
{'array': array([ 0. , 0.00024414, -0.00024414, ..., -0.00024414,
0. , 0. ], dtype=float32),
'path': 'path/to/audio_1',
'sampling_rate': 16000}
```
Then upload the dataset to the Hugging Face Hub using [`Dataset.push_to_hub`]:
```py
audio_dataset.push_to_hub("<username>/my_dataset")
```
This will create a dataset repository containing your audio dataset:
```
my_dataset/
βββ README.md
βββ data/
βββ train-00000-of-00001.parquet
```
## AudioFolder
The `AudioFolder` is a dataset builder designed to quickly load an audio dataset with several thousand audio files without requiring you to write any code.
Any additional information about your dataset - such as transcription, speaker accent, or speaker intent - is automatically loaded by `AudioFolder` as long as you include this information in a metadata file (`metadata.csv`/`metadata.jsonl`).
<Tip>
π‘ Take a look at the [Split pattern hierarchy](repository_structure#split-pattern-hierarchy) to learn more about how `AudioFolder` creates dataset splits based on your dataset repository structure.
</Tip>
Create a dataset repository on the Hugging Face Hub and upload your dataset directory following the `AudioFolder` structure:
```
my_dataset/
βββ README.md
βββ metadata.csv
βββ data/
```
The `data` folder can be any name you want.
<Tip>
It can be helpful to store your metadata as a `jsonl` file if the data columns contain a more complex format (like a list of floats) to avoid parsing errors or reading complex values as strings.
</Tip>
The metadata file should include a `file_name` column to link an audio file to it's metadata:
```csv
file_name,transcription
data/first_audio_file.mp3,znowu siΔ duch z ciaΕem zroΕnie w mΕodocianej wstaniesz wiosnie i moΕΌesz skutkiem tych lekΓ³w umieraΔ wstawaΔ wiek wiekΓ³w dalej tam byΕy przestrogi jak siekaΔ gΕowΔ jak nogi
data/second_audio_file.mp3,juΕΌ u ΕΊwierzyΕca podwojΓ³w krΓ³l zasiada przy nim ksiΔ
ΕΌΔta i panowie rada a gdzie wzniosΕy krΔ
ΕΌyΕ ganek rycerze obok kochanek krΓ³l skinΔ
Ε palcem zaczΔto igrzysko
data/third_audio_file.mp3,pewnie kΔdyΕ w obΕΔdzie ubite minΔΕy szlaki zaczekajmy dzieΕ jaki poΕlemy szukaΔ wszΔdzie dziΕ jutro pewnie bΔdzie posΕali wszΔdzie sΕugi czekali dzieΕ i drugi gdy nic nie doczekali z pΕaczem chcΔ
jechaΔ dali
```
Then you can store your dataset in a directory structure like this:
```
metadata.csv
data/first_audio_file.mp3
data/second_audio_file.mp3
data/third_audio_file.mp3
```
Users can now load your dataset and the associated metadata by specifying `audiofolder` in [`load_dataset`] and the dataset directory in `data_dir`:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("audiofolder", data_dir="/path/to/data")
>>> dataset["train"][0]
{'audio':
{'path': '/path/to/extracted/audio/first_audio_file.mp3',
'array': array([ 0.00088501, 0.0012207 , 0.00131226, ..., -0.00045776, -0.00054932, -0.00054932], dtype=float32),
'sampling_rate': 16000},
'transcription': 'znowu siΔ duch z ciaΕem zroΕnie w mΕodocianej wstaniesz wiosnie i moΕΌesz skutkiem tych lekΓ³w umieraΔ wstawaΔ wiek wiekΓ³w dalej tam byΕy przestrogi jak siekaΔ gΕowΔ jak nogi'
}
```
You can also use `audiofolder` to load datasets involving multiple splits. To do so, your dataset directory might have the following structure:
```
data/train/first_train_audio_file.mp3
data/train/second_train_audio_file.mp3
data/test/first_test_audio_file.mp3
data/test/second_test_audio_file.mp3
```
<Tip warning={true}>
Note that if audio files are located not right next to a metadata file, `file_name` column should be a full relative path to an audio file, not just its filename.
</Tip>
For audio datasets that don't have any associated metadata, `AudioFolder` automatically infers the class labels of the dataset based on the directory name. It might be useful for audio classification tasks. Your dataset directory might look like:
```
data/train/electronic/01.mp3
data/train/punk/01.mp3
data/test/electronic/09.mp3
data/test/punk/09.mp3
```
Load the dataset with `AudioFolder`, and it will create a `label` column from the directory name (language id):
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("audiofolder", data_dir="/path/to/data")
>>> dataset["train"][0]
{'audio':
{'path': '/path/to/electronic/01.mp3',
'array': array([ 3.9714024e-07, 7.3031038e-07, 7.5640685e-07, ...,
-1.1963668e-01, -1.1681189e-01, -1.1244172e-01], dtype=float32),
'sampling_rate': 44100},
'label': 0 # "electronic"
}
>>> dataset["train"][-1]
{'audio':
{'path': '/path/to/punk/01.mp3',
'array': array([0.15237972, 0.13222949, 0.10627693, ..., 0.41940814, 0.37578005,
0.33717662], dtype=float32),
'sampling_rate': 44100},
'label': 1 # "punk"
}
```
<Tip warning={true}>
If all audio files are contained in a single directory or if they are not on the same level of directory structure, `label` column won't be added automatically. If you need it, set `drop_labels=False` explicitly.
</Tip>
<Tip>
Some audio datasets, like those found in [Kaggle competitions](https://www.kaggle.com/competitions/kaggle-pog-series-s01e02/overview), have separate metadata files for each split. Provided the metadata features are the same for each split, `audiofolder` can be used to load all splits at once. If the metadata features differ across each split, you should load them with separate `load_dataset()` calls.
</Tip>
## Loading script
Write a dataset loading script to manually create a dataset.
It defines a dataset's splits and configurations, and handles downloading and generating the dataset examples.
The script should have the same name as your dataset folder or repository:
```
my_dataset/
βββ README.md
βββ my_dataset.py
βββ data/
```
The `data` folder can be any name you want, it doesn't have to be `data`. This folder is optional, unless you're hosting your dataset on the Hub.
This directory structure allows your dataset to be loaded in one line:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("path/to/my_dataset")
```
This guide will show you how to create a dataset loading script for audio datasets, which is a bit different from <a class="underline decoration-green-400 decoration-2 font-semibold" href="./dataset_script">creating a loading script for text datasets</a>.
Audio datasets are commonly stored in `tar.gz` archives which requires a particular approach to support streaming mode. While streaming is not required, we highly encourage implementing streaming support in your audio dataset because:
1. Users without a lot of disk space can use your dataset without downloading it. Learn more about streaming in the [Stream](./stream) guide!
2. Users can preview a dataset in the dataset viewer.
Here is an example using TAR archives:
```
my_dataset/
βββ README.md
βββ my_dataset.py
βββ data/
βββ train.tar.gz
βββ test.tar.gz
βββ metadata.csv
```
In addition to learning how to create a streamable dataset, you'll also learn how to:
* Create a dataset builder class.
* Create dataset configurations.
* Add dataset metadata.
* Download and define the dataset splits.
* Generate the dataset.
* Upload the dataset to the Hub.
The best way to learn is to open up an existing audio dataset loading script, like [Vivos](https://huggingface.co/datasets/vivos/blob/main/vivos.py), and follow along!
<Tip warning=True>
This guide shows how to process audio data stored in TAR archives - the most frequent case for audio datasets. Check out [minds14](https://huggingface.co/datasets/PolyAI/minds14/blob/main/minds14.py) dataset for an example of an audio script which uses ZIP archives.
</Tip>
<Tip>
To help you get started, we created a loading script [template](https://github.com/huggingface/datasets/blob/main/templates/new_dataset_script.py) you can copy and use as a starting point!
</Tip>
### Create a dataset builder class
[`GeneratorBasedBuilder`] is the base class for datasets generated from a dictionary generator. Within this class, there are three methods to help create your dataset:
* `_info` stores information about your dataset like its description, license, and features.
* `_split_generators` downloads the dataset and defines its splits.
* `_generate_examples` generates the dataset's samples containing the audio data and other features specified in `info` for each split.
Start by creating your dataset class as a subclass of [`GeneratorBasedBuilder`] and add the three methods. Don't worry about filling in each of these methods yet, you'll develop those over the next few sections:
```py
class VivosDataset(datasets.GeneratorBasedBuilder):
"""VIVOS is a free Vietnamese speech corpus consisting of 15 hours of recording speech prepared for
Vietnamese Automatic Speech Recognition task."""
def _info(self):
def _split_generators(self, dl_manager):
def _generate_examples(self, prompts_path, path_to_clips, audio_files):
```
#### Multiple configurations
In some cases, a dataset may have more than one configuration. For example, [LibriVox Indonesia](https://huggingface.co/datasets/indonesian-nlp/librivox-indonesia) dataset has several configurations corresponding to different languages.
To create different configurations, use the [`BuilderConfig`] class to create a subclass of your dataset. The only required parameter is the `name` of the configuration, which must be passed to the configuration's superclass `__init__()`. Otherwise, you can specify any custom parameters you want in your configuration class.
```py
class LibriVoxIndonesiaConfig(datasets.BuilderConfig):
"""BuilderConfig for LibriVoxIndonesia."""
def __init__(self, name, version, **kwargs):
self.language = kwargs.pop("language", None)
self.release_date = kwargs.pop("release_date", None)
self.num_clips = kwargs.pop("num_clips", None)
self.num_speakers = kwargs.pop("num_speakers", None)
self.validated_hr = kwargs.pop("validated_hr", None)
self.total_hr = kwargs.pop("total_hr", None)
self.size_bytes = kwargs.pop("size_bytes", None)
self.size_human = size_str(self.size_bytes)
description = (
f"LibriVox-Indonesia speech to text dataset in {self.language} released on {self.release_date}. "
f"The dataset comprises {self.validated_hr} hours of transcribed speech data"
)
super(LibriVoxIndonesiaConfig, self).__init__(
name=name,
version=datasets.Version(version),
description=description,
**kwargs,
)
```
Define your configurations in the `BUILDER_CONFIGS` class variable inside [`GeneratorBasedBuilder`]. In this example, the author imports the languages from a separate `release_stats.py` [file](https://huggingface.co/datasets/indonesian-nlp/librivox-indonesia/blob/main/release_stats.py) from their repository, and then loops through each language to create a configuration:
```py
class LibriVoxIndonesiaConfig(datasets.GeneratorBasedBuilder):
DEFAULT_CONFIG_NAME = "all"
BUILDER_CONFIGS = [
LibriVoxIndonesiaConfig(
name=lang,
version=STATS["version"],
language=LANGUAGES[lang],
release_date=STATS["date"],
num_clips=lang_stats["clips"],
num_speakers=lang_stats["users"],
total_hr=float(lang_stats["totalHrs"]) if lang_stats["totalHrs"] else None,
size_bytes=int(lang_stats["size"]) if lang_stats["size"] else None,
)
for lang, lang_stats in STATS["locales"].items()
]
```
<Tip>
Typically, users need to specify a configuration to load in [`load_dataset`], otherwise a `ValueError` is raised. You can avoid this by setting a default dataset configuration to load in `DEFAULT_CONFIG_NAME`.
</Tip>
Now if users want to load the Balinese (`bal`) configuration, they can use the configuration name:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("indonesian-nlp/librivox-indonesia", "bal", split="train")
```
### Add dataset metadata
Adding information about your dataset helps users to learn more about it. This information is stored in the [`DatasetInfo`] class which is returned by the `info` method. Users can access this information by:
```py
>>> from datasets import load_dataset_builder
>>> ds_builder = load_dataset_builder("vivos")
>>> ds_builder.info
```
There is a lot of information you can include about your dataset, but some important ones are:
1. `description` provides a concise description of the dataset.
2. `features` specify the dataset column types. Since you're creating an audio loading script, you'll need to include the [`Audio`] feature and the `sampling_rate` of the dataset.
3. `homepage` provides a link to the dataset homepage.
4. `license` specify the permissions for using a dataset as defined by the license type.
5. `citation` is a BibTeX citation of the dataset.
<Tip>
You'll notice a lot of the dataset information is defined earlier in the loading script which can make it easier to read. There are also other [`~Dataset.Features`] you can input, so be sure to check out the full list and [features guide](./about_dataset_features) for more details.
</Tip>
```py
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"speaker_id": datasets.Value("string"),
"path": datasets.Value("string"),
"audio": datasets.Audio(sampling_rate=16_000),
"sentence": datasets.Value("string"),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
```
### Download and define the dataset splits
Now that you've added some information about your dataset, the next step is to download the dataset and define the splits.
1. Use the [`~DownloadManager.download`] method to download metadata file at `_PROMPTS_URLS` and audio TAR archive at `_DATA_URL`. This method returns the path to the local file/archive. In streaming mode, it doesn't download the file(s) and just returns a URL to stream the data from. This method accepts:
* a relative path to a file inside a Hub dataset repository (for example, in the `data/` folder)
* a URL to a file hosted somewhere else
* a (nested) list or dictionary of file names or URLs
2. After you've downloaded the dataset, use the [`SplitGenerator`] to organize the audio files and sentence prompts in each split. Name each split with a standard name like: `Split.TRAIN`, `Split.TEST`, and `SPLIT.Validation`.
In the `gen_kwargs` parameter, specify the file path to the `prompts_path` and `path_to_clips`. For `audio_files`, you'll need to use [`~DownloadManager.iter_archive`] to iterate over the audio files in the TAR archive. This enables streaming for your dataset. All of these file paths are passed onto the next step where you'll actually generate the dataset.
```py
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
prompts_paths = dl_manager.download(_PROMPTS_URLS)
archive = dl_manager.download(_DATA_URL)
train_dir = "vivos/train"
test_dir = "vivos/test"
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"prompts_path": prompts_paths["train"],
"path_to_clips": train_dir + "/waves",
"audio_files": dl_manager.iter_archive(archive),
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"prompts_path": prompts_paths["test"],
"path_to_clips": test_dir + "/waves",
"audio_files": dl_manager.iter_archive(archive),
},
),
]
```
<Tip warning={true}>
This implementation does not extract downloaded archives. If you want to extract files after download, you need to additionally use [`~DownloadManager.extract`], see the [(Advanced) Extract TAR archives](#advanced-extract-tar-archives-locally) section.
</Tip>
### Generate the dataset
The last method in the [`GeneratorBasedBuilder`] class actually generates the samples in the dataset. It yields a dataset according to the structure specified in `features` from the `info` method. As you can see, `generate_examples` accepts the `prompts_path`, `path_to_clips`, and `audio_files` from the previous method as arguments.
Files inside TAR archives are accessed and yielded sequentially. This means you need to have the metadata associated with the audio files in the TAR file in hand first so you can yield it with its corresponding audio file.
```py
examples = {}
with open(prompts_path, encoding="utf-8") as f:
for row in f:
data = row.strip().split(" ", 1)
speaker_id = data[0].split("_")[0]
audio_path = "/".join([path_to_clips, speaker_id, data[0] + ".wav"])
examples[audio_path] = {
"speaker_id": speaker_id,
"path": audio_path,
"sentence": data[1],
}
```
Finally, iterate over files in `audio_files` and yield them along with their corresponding metadata. [`~DownloadManager.iter_archive`] yields a tuple of (`path`, `f`) where `path` is a **relative** path to a file inside TAR archive and `f` is a file object itself.
```py
inside_clips_dir = False
id_ = 0
for path, f in audio_files:
if path.startswith(path_to_clips):
inside_clips_dir = True
if path in examples:
audio = {"path": path, "bytes": f.read()}
yield id_, {**examples[path], "audio": audio}
id_ += 1
elif inside_clips_dir:
break
```
Put these two steps together, and the whole `_generate_examples` method looks like:
```py
def _generate_examples(self, prompts_path, path_to_clips, audio_files):
"""Yields examples as (key, example) tuples."""
examples = {}
with open(prompts_path, encoding="utf-8") as f:
for row in f:
data = row.strip().split(" ", 1)
speaker_id = data[0].split("_")[0]
audio_path = "/".join([path_to_clips, speaker_id, data[0] + ".wav"])
examples[audio_path] = {
"speaker_id": speaker_id,
"path": audio_path,
"sentence": data[1],
}
inside_clips_dir = False
id_ = 0
for path, f in audio_files:
if path.startswith(path_to_clips):
inside_clips_dir = True
if path in examples:
audio = {"path": path, "bytes": f.read()}
yield id_, {**examples[path], "audio": audio}
id_ += 1
elif inside_clips_dir:
break
```
### Upload the dataset to the Hub
Once your script is ready, [create a dataset card](./dataset_card) and [upload it to the Hub](./share).
Congratulations, you can now load your dataset from the Hub! π₯³
```py
>>> from datasets import load_dataset
>>> load_dataset("<username>/my_dataset")
```
### (Advanced) Extract TAR archives locally
In the example above downloaded archives are not extracted and therefore examples do not contain information about where they are stored locally.
To explain how to do the extraction in a way that it also supports streaming, we will briefly go through the [LibriVox Indonesia](https://huggingface.co/datasets/indonesian-nlp/librivox-indonesia/blob/main/librivox-indonesia.py) loading script.
#### Download and define the dataset splits
1. Use the [`~DownloadManager.download`] method to download the audio data at `_AUDIO_URL`.
2. To extract audio TAR archive locally, use the [`~DownloadManager.extract`]. You can use this method only in non-streaming mode (when `dl_manager.is_streaming=False`). This returns a local path to the extracted archive directory:
```py
local_extracted_archive = dl_manager.extract(audio_path) if not dl_manager.is_streaming else None
```
3. Use the [`~DownloadManager.iter_archive`] method to iterate over the archive at `audio_path`, just like in the Vivos example above. [`~DownloadManager.iter_archive`] doesn't provide any information about the full paths of files from the archive, even if it has been extracted. As a result, you need to pass the `local_extracted_archive` path to the next step in `gen_kwargs`, in order to preserve information about where the archive was extracted to. This is required to construct the correct paths to the local files when you generate the examples.
<Tip warning={true}>
The reason you need to use a combination of [`~DownloadManager.download`] and [`~DownloadManager.iter_archive`] is because files in TAR archives can't be accessed directly by their paths. Instead, you'll need to iterate over the files within the archive! You can use [`~DownloadManager.download_and_extract`] and [`~DownloadManager.extract`] with TAR archives only in non-streaming mode, otherwise it would throw an error.
</Tip>
4. Use the [`~DownloadManager.download_and_extract`] method to download the metadata file specified in `_METADATA_URL`. This method returns a path to a local file in non-streaming mode. In streaming mode, it doesn't download file locally and returns the same URL.
5. Now use the [`SplitGenerator`] to organize the audio files and metadata in each split. Name each split with a standard name like: `Split.TRAIN`, `Split.TEST`, and `SPLIT.Validation`.
In the `gen_kwargs` parameter, specify the file paths to `local_extracted_archive`, `audio_files`, `metadata_path`, and `path_to_clips`. Remember, for `audio_files`, you need to use [`~DownloadManager.iter_archive`] to iterate over the audio files in the TAR archives. This enables streaming for your dataset! All of these file paths are passed onto the next step where the dataset samples are generated.
```py
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_manager.download_config.ignore_url_params = True
audio_path = dl_manager.download(_AUDIO_URL)
local_extracted_archive = dl_manager.extract(audio_path) if not dl_manager.is_streaming else None
path_to_clips = "librivox-indonesia"
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"local_extracted_archive": local_extracted_archive,
"audio_files": dl_manager.iter_archive(audio_path),
"metadata_path": dl_manager.download_and_extract(_METADATA_URL + "/metadata_train.csv.gz"),
"path_to_clips": path_to_clips,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"local_extracted_archive": local_extracted_archive,
"audio_files": dl_manager.iter_archive(audio_path),
"metadata_path": dl_manager.download_and_extract(_METADATA_URL + "/metadata_test.csv.gz"),
"path_to_clips": path_to_clips,
},
),
]
```
#### Generate the dataset
Here `_generate_examples` accepts `local_extracted_archive`, `audio_files`, `metadata_path`, and `path_to_clips` from the previous method as arguments.
1. TAR files are accessed and yielded sequentially. This means you need to have the metadata in `metadata_path` associated with the audio files in the TAR file in hand first so that you can yield it with its corresponding audio file further:
```py
with open(metadata_path, "r", encoding="utf-8") as f:
reader = csv.DictReader(f)
for row in reader:
if self.config.name == "all" or self.config.name == row["language"]:
row["path"] = os.path.join(path_to_clips, row["path"])
# if data is incomplete, fill with empty values
for field in data_fields:
if field not in row:
row[field] = ""
metadata[row["path"]] = row
```
2. Now you can yield the files in `audio_files` archive. When you use [`~DownloadManager.iter_archive`], it yielded a tuple of (`path`, `f`) where `path` is a **relative path** to a file inside the archive, and `f` is the file object itself. To get the **full path** to the locally extracted file, join the path of the directory (`local_extracted_path`) where the archive is extracted to and the relative audio file path (`path`):
```py
for path, f in audio_files:
if path in metadata:
result = dict(metadata[path])
# set the audio feature and the path to the extracted file
path = os.path.join(local_extracted_archive, path) if local_extracted_archive else path
result["audio"] = {"path": path, "bytes": f.read()}
result["path"] = path
yield id_, result
id_ += 1
````
Put both of these steps together, and the whole `_generate_examples` method should look like:
```py
def _generate_examples(
self,
local_extracted_archive,
audio_files,
metadata_path,
path_to_clips,
):
"""Yields examples."""
data_fields = list(self._info().features.keys())
metadata = {}
with open(metadata_path, "r", encoding="utf-8") as f:
reader = csv.DictReader(f)
for row in reader:
if self.config.name == "all" or self.config.name == row["language"]:
row["path"] = os.path.join(path_to_clips, row["path"])
# if data is incomplete, fill with empty values
for field in data_fields:
if field not in row:
row[field] = ""
metadata[row["path"]] = row
id_ = 0
for path, f in audio_files:
if path in metadata:
result = dict(metadata[path])
# set the audio feature and the path to the extracted file
path = os.path.join(local_extracted_archive, path) if local_extracted_archive else path
result["audio"] = {"path": path, "bytes": f.read()}
result["path"] = path
yield id_, result
id_ += 1
```
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/metrics.mdx | # Evaluate predictions
<Tip warning={true}>
Metrics is deprecated in π€ Datasets. To learn more about how to use metrics, take a look at the library π€ [Evaluate](https://huggingface.co/docs/evaluate/index)! In addition to metrics, you can find more tools for evaluating models and datasets.
</Tip>
π€ Datasets provides various common and NLP-specific [metrics](https://huggingface.co/metrics) for you to measure your models performance. In this section of the tutorials, you will load a metric and use it to evaluate your models predictions.
You can see what metrics are available with [`list_metrics`]:
```py
>>> from datasets import list_metrics
>>> metrics_list = list_metrics()
>>> len(metrics_list)
28
>>> print(metrics_list)
['accuracy', 'bertscore', 'bleu', 'bleurt', 'cer', 'comet', 'coval', 'cuad', 'f1', 'gleu', 'glue', 'indic_glue', 'matthews_correlation', 'meteor', 'pearsonr', 'precision', 'recall', 'rouge', 'sacrebleu', 'sari', 'seqeval', 'spearmanr', 'squad', 'squad_v2', 'super_glue', 'wer', 'wiki_split', 'xnli']
```
## Load metric
It is very easy to load a metric with π€ Datasets. In fact, you will notice that it is very similar to loading a dataset! Load a metric from the Hub with [`load_metric`]:
```py
>>> from datasets import load_metric
>>> metric = load_metric('glue', 'mrpc')
```
This will load the metric associated with the MRPC dataset from the GLUE benchmark.
## Select a configuration
If you are using a benchmark dataset, you need to select a metric that is associated with the configuration you are using. Select a metric configuration by providing the configuration name:
```py
>>> metric = load_metric('glue', 'mrpc')
```
## Metrics object
Before you begin using a [`Metric`] object, you should get to know it a little better. As with a dataset, you can return some basic information about a metric. For example, access the `inputs_description` parameter in [`datasets.MetricInfo`] to get more information about a metrics expected input format and some usage examples:
```py
>>> print(metric.inputs_description)
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
...
>>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
...
```
Notice for the MRPC configuration, the metric expects the input format to be zero or one. For a complete list of attributes you can return with your metric, take a look at [`MetricInfo`].
## Compute metric
Once you have loaded a metric, you are ready to use it to evaluate a models predictions. Provide the model predictions and references to [`~datasets.Metric.compute`]:
```py
>>> model_predictions = model(model_inputs)
>>> final_score = metric.compute(predictions=model_predictions, references=gold_references)
```
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/image_classification.mdx | # Image classification
Image classification datasets are used to train a model to classify an entire image. There are a wide variety of applications enabled by these datasets such as identifying endangered wildlife species or screening for disease in medical images. This guide will show you how to apply transformations to an image classification dataset.
Before you start, make sure you have up-to-date versions of `albumentations` and `cv2` installed:
```bash
pip install -U albumentations opencv-python
```
This guide uses the [Beans](https://huggingface.co/datasets/beans) dataset for identifying the type of bean plant disease based on an image of its leaf.
Load the dataset and take a look at an example:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("beans")
>>> dataset["train"][10]
{'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=500x500 at 0x7F8D2F4D7A10>,
'image_file_path': '/root/.cache/huggingface/datasets/downloads/extracted/b0a21163f78769a2cf11f58dfc767fb458fc7cea5c05dccc0144a2c0f0bc1292/train/angular_leaf_spot/angular_leaf_spot_train.204.jpg',
'labels': 0}
```
The dataset has three fields:
* `image`: a PIL image object.
* `image_file_path`: the path to the image file.
* `labels`: the label or category of the image.
Next, check out an image:
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/img_clf.png">
</div>
Now apply some augmentations with `albumentations`. You'll randomly crop the image, flip it horizontally, and adjust its brightness.
```py
>>> import cv2
>>> import albumentations
>>> import numpy as np
>>> transform = albumentations.Compose([
... albumentations.RandomCrop(width=256, height=256),
... albumentations.HorizontalFlip(p=0.5),
... albumentations.RandomBrightnessContrast(p=0.2),
... ])
```
Create a function to apply the transformation to the images:
```py
>>> def transforms(examples):
... examples["pixel_values"] = [
... transform(image=np.array(image))["image"] for image in examples["image"]
... ]
...
... return examples
```
Use the [`~Dataset.set_transform`] function to apply the transformation on-the-fly to batches of the dataset to consume less disk space:
```py
>>> dataset.set_transform(transforms)
```
You can verify the transformation worked by indexing into the `pixel_values` of the first example:
```py
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> img = dataset["train"][0]["pixel_values"]
>>> plt.imshow(img)
```
<div class="flex justify-center">
<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/img_clf_aug.png">
<img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/img_clf_aug.png"/>
</div>
<Tip>
Now that you know how to process a dataset for image classification, learn
[how to train an image classification model](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb)
and use it for inference.
</Tip> | 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/access.mdx | # Know your dataset
There are two types of dataset objects, a regular [`Dataset`] and then an β¨ [`IterableDataset`] β¨. A [`Dataset`] provides fast random access to the rows, and memory-mapping so that loading even large datasets only uses a relatively small amount of device memory. But for really, really big datasets that won't even fit on disk or in memory, an [`IterableDataset`] allows you to access and use the dataset without waiting for it to download completely!
This tutorial will show you how to load and access a [`Dataset`] and an [`IterableDataset`].
## Dataset
When you load a dataset split, you'll get a [`Dataset`] object. You can do many things with a [`Dataset`] object, which is why it's important to learn how to manipulate and interact with the data stored inside.
This tutorial uses the [rotten_tomatoes](https://huggingface.co/datasets/rotten_tomatoes) dataset, but feel free to load any dataset you'd like and follow along!
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("rotten_tomatoes", split="train")
```
### Indexing
A [`Dataset`] contains columns of data, and each column can be a different type of data. The *index*, or axis label, is used to access examples from the dataset. For example, indexing by the row returns a dictionary of an example from the dataset:
```py
# Get the first row in the dataset
>>> dataset[0]
{'label': 1,
'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
```
Use the `-` operator to start from the end of the dataset:
```py
# Get the last row in the dataset
>>> dataset[-1]
{'label': 0,
'text': 'things really get weird , though not particularly scary : the movie is all portent and no content .'}
```
Indexing by the column name returns a list of all the values in the column:
```py
>>> dataset["text"]
['the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .',
'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .',
'effective but too-tepid biopic',
...,
'things really get weird , though not particularly scary : the movie is all portent and no content .']
```
You can combine row and column name indexing to return a specific value at a position:
```py
>>> dataset[0]["text"]
'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'
```
But it is important to remember that indexing order matters, especially when working with large audio and image datasets. Indexing by the column name returns all the values in the column first, then loads the value at that position. For large datasets, it may be slower to index by the column name first.
```py
>>> with Timer():
... dataset[0]['text']
Elapsed time: 0.0031 seconds
>>> with Timer():
... dataset["text"][0]
Elapsed time: 0.0094 seconds
```
### Slicing
Slicing returns a slice - or subset - of the dataset, which is useful for viewing several rows at once. To slice a dataset, use the `:` operator to specify a range of positions.
```py
# Get the first three rows
>>> dataset[:3]
{'label': [1, 1, 1],
'text': ['the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .',
'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .',
'effective but too-tepid biopic']}
# Get rows between three and six
>>> dataset[3:6]
{'label': [1, 1, 1],
'text': ['if you sometimes like to go to the movies to have fun , wasabi is a good place to start .',
"emerges as something rare , an issue movie that's so honest and keenly observed that it doesn't feel like one .",
'the film provides some great insight into the neurotic mindset of all comics -- even those who have reached the absolute top of the game .']}
```
## IterableDataset
An [`IterableDataset`] is loaded when you set the `streaming` parameter to `True` in [`~datasets.load_dataset`]:
```py
>>> from datasets import load_dataset
>>> iterable_dataset = load_dataset("food101", split="train", streaming=True)
>>> for example in iterable_dataset:
... print(example)
... break
{'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=384x512 at 0x7F0681F5C520>, 'label': 6}
```
An [`IterableDataset`] progressively iterates over a dataset one example at a time, so you don't have to wait for the whole dataset to download before you can use it. As you can imagine, this is quite useful for large datasets you want to use immediately!
However, this means an [`IterableDataset`]'s behavior is different from a regular [`Dataset`]. You don't get random access to examples in an [`IterableDataset`]. Instead, you should iterate over its elements, for example, by calling `next(iter())` or with a `for` loop to return the next item from the [`IterableDataset`]:
```py
>>> next(iter(iterable_dataset))
{'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=384x512 at 0x7F0681F59B50>,
'label': 6}
>>> for example in iterable_dataset:
... print(example)
... break
{'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=384x512 at 0x7F7479DE82B0>, 'label': 6}
```
You can return a subset of the dataset with a specific number of examples in it with [`IterableDataset.take`]:
```py
# Get first three examples
>>> list(iterable_dataset.take(3))
[{'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=384x512 at 0x7F7479DEE9D0>,
'label': 6},
{'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=512x512 at 0x7F7479DE8190>,
'label': 6},
{'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=512x383 at 0x7F7479DE8310>,
'label': 6}]
```
But unlike [slicing](access/#slicing), [`IterableDataset.take`] creates a new [`IterableDataset`].
## Next steps
Interested in learning more about the differences between these two types of datasets? Learn more about them in the [Differences between `Dataset` and `IterableDataset`](about_mapstyle_vs_iterable) conceptual guide.
To get more hands-on with these datasets types, check out the [Process](process) guide to learn how to preprocess a [`Dataset`] or the [Stream](stream) guide to learn how to preprocess an [`IterableDataset`]. | 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/image_load.mdx | # Load image data
Image datasets are loaded from the `image` column, which contains a PIL object.
<Tip>
To work with image datasets, you need to have the `vision` dependency installed. Check out the [installation](./installation#vision) guide to learn how to install it.
</Tip>
When you load an image dataset and call the `image` column, the [`Image`] feature automatically decodes the PIL object into an image:
```py
>>> from datasets import load_dataset, Image
>>> dataset = load_dataset("beans", split="train")
>>> dataset[0]["image"]
```
<Tip warning={true}>
Index into an image dataset using the row index first and then the `image` column - `dataset[0]["image"]` - to avoid decoding and resampling all the image objects in the dataset. Otherwise, this can be a slow and time-consuming process if you have a large dataset.
</Tip>
For a guide on how to load any type of dataset, take a look at the <a class="underline decoration-sky-400 decoration-2 font-semibold" href="./loading">general loading guide</a>.
## Local files
You can load a dataset from the image path. Use the [`~Dataset.cast_column`] function to accept a column of image file paths, and decode it into a PIL image with the [`Image`] feature:
```py
>>> from datasets import load_dataset, Image
>>> dataset = Dataset.from_dict({"image": ["path/to/image_1", "path/to/image_2", ..., "path/to/image_n"]}).cast_column("image", Image())
>>> dataset[0]["image"]
<PIL.PngImagePlugin.PngImageFile image mode=RGBA size=1200x215 at 0x15E6D7160>]
```
If you only want to load the underlying path to the image dataset without decoding the image object, set `decode=False` in the [`Image`] feature:
```py
>>> dataset = load_dataset("beans", split="train").cast_column("image", Image(decode=False))
>>> dataset[0]["image"]
{'bytes': None,
'path': '/root/.cache/huggingface/datasets/downloads/extracted/b0a21163f78769a2cf11f58dfc767fb458fc7cea5c05dccc0144a2c0f0bc1292/train/bean_rust/bean_rust_train.29.jpg'}
```
## ImageFolder
You can also load a dataset with an `ImageFolder` dataset builder which does not require writing a custom dataloader. This makes `ImageFolder` ideal for quickly creating and loading image datasets with several thousand images for different vision tasks. Your image dataset structure should look like this:
```
folder/train/dog/golden_retriever.png
folder/train/dog/german_shepherd.png
folder/train/dog/chihuahua.png
folder/train/cat/maine_coon.png
folder/train/cat/bengal.png
folder/train/cat/birman.png
```
Load your dataset by specifying `imagefolder` and the directory of your dataset in `data_dir`:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("imagefolder", data_dir="/path/to/folder")
>>> dataset["train"][0]
{"image": <PIL.PngImagePlugin.PngImageFile image mode=RGBA size=1200x215 at 0x15E6D7160>, "label": 0}
>>> dataset["train"][-1]
{"image": <PIL.PngImagePlugin.PngImageFile image mode=RGBA size=1200x215 at 0x15E8DAD30>, "label": 1}
```
Load remote datasets from their URLs with the `data_files` parameter:
```py
>>> dataset = load_dataset("imagefolder", data_files="https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip", split="train")
```
Some datasets have a metadata file (`metadata.csv`/`metadata.jsonl`) associated with it, containing other information about the data like bounding boxes, text captions, and labels. The metadata is automatically loaded when you call [`load_dataset`] and specify `imagefolder`.
To ignore the information in the metadata file, set `drop_labels=False` in [`load_dataset`], and allow `ImageFolder` to automatically infer the label name from the directory name:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("imagefolder", data_dir="/path/to/folder", drop_labels=False)
```
<Tip>
For more information about creating your own `ImageFolder` dataset, take a look at the [Create an image dataset](./image_dataset) guide.
</Tip> | 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/about_mapstyle_vs_iterable.mdx | # Differences between Dataset and IterableDataset
There are two types of dataset objects, a [`Dataset`] and an [`IterableDataset`].
Whichever type of dataset you choose to use or create depends on the size of the dataset.
In general, an [`IterableDataset`] is ideal for big datasets (think hundreds of GBs!) due to its lazy behavior and speed advantages, while a [`Dataset`] is great for everything else.
This page will compare the differences between a [`Dataset`] and an [`IterableDataset`] to help you pick the right dataset object for you.
## Downloading and streaming
When you have a regular [`Dataset`], you can access it using `my_dataset[0]`. This provides random access to the rows.
Such datasets are also called "map-style" datasets.
For example you can download ImageNet-1k like this and access any row:
```python
from datasets import load_dataset
imagenet = load_dataset("imagenet-1k", split="train") # downloads the full dataset
print(imagenet[0])
```
But one caveat is that you must have the entire dataset stored on your disk or in memory, which blocks you from accessing datasets bigger than the disk.
Because it can become inconvenient for big datasets, there exists another type of dataset, the [`IterableDataset`].
When you have an `IterableDataset`, you can access it using a `for` loop to load the data progressively as you iterate over the dataset.
This way, only a small fraction of examples is loaded in memory, and you don't write anything on disk.
For example, you can stream the ImageNet-1k dataset without downloading it on disk:
```python
from datasets import load_dataset
imagenet = load_dataset("imagenet-1k", split="train", streaming=True) # will start loading the data when iterated over
for example in imagenet:
print(example)
break
```
Streaming can read online data without writing any file to disk.
For example, you can stream datasets made out of multiple shards, each of which is hundreds of gigabytes like [C4](https://huggingface.co/datasets/c4), [OSCAR](https://huggingface.co/datasets/oscar) or [LAION-2B](https://huggingface.co/datasets/laion/laion2B-en).
Learn more about how to stream a dataset in the [Dataset Streaming Guide](./stream).
This is not the only difference though, because the "lazy" behavior of an `IterableDataset` is also present when it comes to dataset creation and processing.
## Creating map-style datasets and iterable datasets
You can create a [`Dataset`] using lists or dictionaries, and the data is entirely converted to Arrow so you can easily access any row:
```python
my_dataset = Dataset.from_dict({"col_1": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]})
print(my_dataset[0])
```
To create an `IterableDataset` on the other hand, you must provide a "lazy" way to load the data.
In Python, we generally use generator functions. These functions `yield` one example at a time, which means you can't access a row by slicing it like a regular `Dataset`:
```python
def my_generator(n):
for i in range(n):
yield {"col_1": i}
my_iterable_dataset = IterableDataset.from_generator(my_generator, gen_kwargs={"n": 10})
for example in my_iterable_dataset:
print(example)
break
```
## Loading local files entirely and progressively
It is possible to convert local or remote data files to an Arrow [`Dataset`] using [`load_dataset`]:
```python
data_files = {"train": ["path/to/data.csv"]}
my_dataset = load_dataset("csv", data_files=data_files, split="train")
print(my_dataset[0])
```
However, this requires a conversion step from CSV to Arrow format, which takes time and disk space if your dataset is big.
To save disk space and skip the conversion step, you can define an `IterableDataset` by streaming from the local files directly.
This way, the data is read progressively from the local files as you iterate over the dataset:
```python
data_files = {"train": ["path/to/data.csv"]}
my_iterable_dataset = load_dataset("csv", data_files=data_files, split="train", streaming=True)
for example in my_iterable_dataset: # this reads the CSV file progressively as you iterate over the dataset
print(example)
break
```
Many file formats are supported, like CSV, JSONL, and Parquet, as well as image and audio files.
You can find more information in the corresponding guides for loading [tabular](./tabular_load), [text](./nlp_load), [vision](./image_load), and [audio](./audio_load]) datasets.
## Eager data processing and lazy data processing
When you process a [`Dataset`] object using [`Dataset.map`], the entire dataset is processed immediately and returned.
This is similar to how `pandas` works for example.
```python
my_dataset = my_dataset.map(process_fn) # process_fn is applied on all the examples of the dataset
print(my_dataset[0])
```
On the other hand, due to the "lazy" nature of an `IterableDataset`, calling [`IterableDataset.map`] does not apply your `map` function over the full dataset.
Instead, your `map` function is applied on-the-fly.
Because of that, you can chain multiple processing steps and they will all run at once when you start iterating over the dataset:
```python
my_iterable_dataset = my_iterable_dataset.map(process_fn_1)
my_iterable_dataset = my_iterable_dataset.filter(filter_fn)
my_iterable_dataset = my_iterable_dataset.map(process_fn_2)
# process_fn_1, filter_fn and process_fn_2 are applied on-the-fly when iterating over the dataset
for example in my_iterable_dataset:
print(example)
break
```
## Exact and fast approximate shuffling
When you shuffle a [`Dataset`] using [`Dataset.shuffle`], you apply an exact shuffling of the dataset.
It works by taking a list of indices `[0, 1, 2, ... len(my_dataset) - 1]` and shuffling this list.
Then, accessing `my_dataset[0]` returns the row and index defined by the first element of the indices mapping that has been shuffled:
```python
my_dataset = my_dataset.shuffle(seed=42)
print(my_dataset[0])
```
Since we don't have random access to the rows in the case of an `IterableDataset`, we can't use a shuffled list of indices and access a row at an arbitrary position.
This prevents the use of exact shuffling.
Instead, a fast approximate shuffling is used in [`IterableDataset.shuffle`].
It uses a shuffle buffer to sample random examples iteratively from the dataset.
Since the dataset is still read iteratively, it provides excellent speed performance:
```python
my_iterable_dataset = my_iterable_dataset.shuffle(seed=42, buffer_size=100)
for example in my_iterable_dataset:
print(example)
break
```
But using a shuffle buffer is not enough to provide a satisfactory shuffling for machine learning model training. So [`IterableDataset.shuffle`] also shuffles the dataset shards if your dataset is made of multiple files or sources:
```python
# Stream from the internet
my_iterable_dataset = load_dataset("c4", "en", split="train", streaming=True)
my_iterable_dataset.n_shards # 1024
# Stream from local files
data_files = {"train": [f"path/to/data_{i}.csv" for i in range(1024)]}
my_iterable_dataset = load_dataset("csv", data_files=data_files, split="train", streaming=True)
my_iterable_dataset.n_shards # 1024
# From a generator function
def my_generator(n, sources):
for source in sources:
for example_id_for_current_source in range(n):
yield {"example_id": f"{source}_{example_id_for_current_source}"}
gen_kwargs = {"n": 10, "sources": [f"path/to/data_{i}" for i in range(1024)]}
my_iterable_dataset = IterableDataset.from_generator(my_generator, gen_kwargs=gen_kwargs)
my_iterable_dataset.n_shards # 1024
```
## Speed differences
Regular [`Dataset`] objects are based on Arrow which provides fast random access to the rows.
Thanks to memory mapping and the fact that Arrow is an in-memory format, reading data from disk doesn't do expensive system calls and deserialization.
It provides even faster data loading when iterating using a `for` loop by iterating on contiguous Arrow record batches.
However as soon as your [`Dataset`] has an indices mapping (via [`Dataset.shuffle`] for example), the speed can become 10x slower.
This is because there is an extra step to get the row index to read using the indices mapping, and most importantly, you aren't reading contiguous chunks of data anymore.
To restore the speed, you'd need to rewrite the entire dataset on your disk again using [`Dataset.flatten_indices`], which removes the indices mapping.
This may take a lot of time depending of the size of your dataset though:
```python
my_dataset[0] # fast
my_dataset = my_dataset.shuffle(seed=42)
my_dataset[0] # up to 10x slower
my_dataset = my_dataset.flatten_indices() # rewrite the shuffled dataset on disk as contiguous chunks of data
my_dataset[0] # fast again
```
In this case, we recommend switching to an [`IterableDataset`] and leveraging its fast approximate shuffling method [`IterableDataset.shuffle`].
It only shuffles the shards order and adds a shuffle buffer to your dataset, which keeps the speed of your dataset optimal.
You can also reshuffle the dataset easily:
```python
for example in enumerate(my_iterable_dataset): # fast
pass
shuffled_iterable_dataset = my_iterable_dataset.shuffle(seed=42, buffer_size=100)
for example in enumerate(shuffled_iterable_dataset): # as fast as before
pass
shuffled_iterable_dataset = my_iterable_dataset.shuffle(seed=1337, buffer_size=100) # reshuffling using another seed is instantaneous
for example in enumerate(shuffled_iterable_dataset): # still as fast as before
pass
```
If you're using your dataset on multiple epochs, the effective seed to shuffle the shards order in the shuffle buffer is `seed + epoch`.
It makes it easy to reshuffle a dataset between epochs:
```python
for epoch in range(n_epochs):
my_iterable_dataset.set_epoch(epoch)
for example in my_iterable_dataset: # fast + reshuffled at each epoch using `effective_seed = seed + epoch`
pass
```
## Switch from map-style to iterable
If you want to benefit from the "lazy" behavior of an [`IterableDataset`] or their speed advantages, you can switch your map-style [`Dataset`] to an [`IterableDataset`]:
```python
my_iterable_dataset = my_dataset.to_iterable_dataset()
```
If you want to shuffle your dataset or [use it with a PyTorch DataLoader](./use_with_pytorch#stream-data), we recommend generating a shared [`IterableDataset`]:
```python
my_iterable_dataset = my_dataset.to_iterable_dataset(num_shards=1024)
my_iterable_dataset.n_shards # 1024
```
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/use_dataset.mdx | # Preprocess
In addition to loading datasets, π€ Datasets other main goal is to offer a diverse set of preprocessing functions to get a dataset into an appropriate format for training with your machine learning framework.
There are many possible ways to preprocess a dataset, and it all depends on your specific dataset. Sometimes you may need to rename a column, and other times you might need to unflatten nested fields. π€ Datasets provides a way to do most of these things. But in nearly all preprocessing cases, depending on your dataset modality, you'll need to:
- Tokenize a text dataset.
- Resample an audio dataset.
- Apply transforms to an image dataset.
The last preprocessing step is usually setting your dataset format to be compatible with your machine learning framework's expected input format.
In this tutorial, you'll also need to install the π€ Transformers library:
```bash
pip install transformers
```
Grab a dataset of your choice and follow along!
## Tokenize text
Models cannot process raw text, so you'll need to convert the text into numbers. Tokenization provides a way to do this by dividing text into individual words called *tokens*. Tokens are finally converted to numbers.
<Tip>
Check out the [Tokenizers](https://huggingface.co/course/chapter2/4?fw=pt) section in Chapter 2 of the Hugging Face course to learn more about tokenization and different tokenization algorithms.
</Tip>
**1**. Start by loading the [rotten_tomatoes](https://huggingface.co/datasets/rotten_tomatoes) dataset and the tokenizer corresponding to a pretrained [BERT](https://huggingface.co/bert-base-uncased) model. Using the same tokenizer as the pretrained model is important because you want to make sure the text is split in the same way.
```py
>>> from transformers import AutoTokenizer
>>> from datasets import load_dataset
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
>>> dataset = load_dataset("rotten_tomatoes", split="train")
```
**2**. Call your tokenizer on the first row of `text` in the dataset:
```py
>>> tokenizer(dataset[0]["text"])
{'input_ids': [101, 1103, 2067, 1110, 17348, 1106, 1129, 1103, 6880, 1432, 112, 188, 1207, 107, 14255, 1389, 107, 1105, 1115, 1119, 112, 188, 1280, 1106, 1294, 170, 24194, 1256, 3407, 1190, 170, 11791, 5253, 188, 1732, 7200, 10947, 12606, 2895, 117, 179, 7766, 118, 172, 15554, 1181, 3498, 6961, 3263, 1137, 188, 1566, 7912, 14516, 6997, 119, 102],
'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}
```
The tokenizer returns a dictionary with three items:
- `input_ids`: the numbers representing the tokens in the text.
- `token_type_ids`: indicates which sequence a token belongs to if there is more than one sequence.
- `attention_mask`: indicates whether a token should be masked or not.
These values are actually the model inputs.
**3**. The fastest way to tokenize your entire dataset is to use the [`~Dataset.map`] function. This function speeds up tokenization by applying the tokenizer to batches of examples instead of individual examples. Set the `batched` parameter to `True`:
```py
>>> def tokenization(example):
... return tokenizer(example["text"])
>>> dataset = dataset.map(tokenization, batched=True)
```
**4**. Set the format of your dataset to be compatible with your machine learning framework:
<frameworkcontent>
<pt>
Use the [`~Dataset.set_format`] function to set the dataset format to be compatible with PyTorch:
```py
>>> dataset.set_format(type="torch", columns=["input_ids", "token_type_ids", "attention_mask", "labels"])
>>> dataset.format['type']
'torch'
```
</pt>
<tf>
Use the [`~Dataset.to_tf_dataset`] function to set the dataset format to be compatible with TensorFlow. You'll also need to import a [data collator](https://huggingface.co/docs/transformers/main_classes/data_collator#transformers.DataCollatorWithPadding) from π€ Transformers to combine the varying sequence lengths into a single batch of equal lengths:
```py
>>> from transformers import DataCollatorWithPadding
>>> data_collator = DataCollatorWithPadding(tokenizer=tokenizer, return_tensors="tf")
>>> tf_dataset = dataset.to_tf_dataset(
... columns=["input_ids", "token_type_ids", "attention_mask"],
... label_cols=["labels"],
... batch_size=2,
... collate_fn=data_collator,
... shuffle=True
... )
```
</tf>
</frameworkcontent>
**5**. The dataset is now ready for training with your machine learning framework!
## Resample audio signals
Audio inputs like text datasets need to be divided into discrete data points. This is known as *sampling*; the sampling rate tells you how much of the speech signal is captured per second. It is important to make sure the sampling rate of your dataset matches the sampling rate of the data used to pretrain the model you're using. If the sampling rates are different, the pretrained model may perform poorly on your dataset because it doesn't recognize the differences in the sampling rate.
**1**. Start by loading the [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) dataset, the [`Audio`] feature, and the feature extractor corresponding to a pretrained [Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base-960h) model:
```py
>>> from transformers import AutoFeatureExtractor
>>> from datasets import load_dataset, Audio
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h")
>>> dataset = load_dataset("PolyAI/minds14", "en-US", split="train")
```
**2**. Index into the first row of the dataset. When you call the `audio` column of the dataset, it is automatically decoded and resampled:
```py
>>> dataset[0]["audio"]
{'array': array([ 0. , 0.00024414, -0.00024414, ..., -0.00024414,
0. , 0. ], dtype=float32),
'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav',
'sampling_rate': 8000}
```
**3**. Reading a dataset card is incredibly useful and can give you a lot of information about the dataset. A quick look at the MInDS-14 dataset card tells you the sampling rate is 8kHz. Likewise, you can get many details about a model from its model card. The Wav2Vec2 model card says it was sampled on 16kHz speech audio. This means you'll need to upsample the MInDS-14 dataset to match the sampling rate of the model.
Use the [`~Dataset.cast_column`] function and set the `sampling_rate` parameter in the [`Audio`] feature to upsample the audio signal. When you call the `audio` column now, it is decoded and resampled to 16kHz:
```py
>>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000))
>>> dataset[0]["audio"]
{'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ...,
3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32),
'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav',
'sampling_rate': 16000}
```
**4**. Use the [`~Dataset.map`] function to resample the entire dataset to 16kHz. This function speeds up resampling by applying the feature extractor to batches of examples instead of individual examples. Set the `batched` parameter to `True`:
```py
>>> def preprocess_function(examples):
... audio_arrays = [x["array"] for x in examples["audio"]]
... inputs = feature_extractor(
... audio_arrays, sampling_rate=feature_extractor.sampling_rate, max_length=16000, truncation=True
... )
... return inputs
>>> dataset = dataset.map(preprocess_function, batched=True)
```
**5**. The dataset is now ready for training with your machine learning framework!
## Apply data augmentations
The most common preprocessing you'll do with image datasets is *data augmentation*, a process that introduces random variations to an image without changing the meaning of the data. This can mean changing the color properties of an image or randomly cropping an image. You are free to use any data augmentation library you like, and π€ Datasets will help you apply your data augmentations to your dataset.
**1**. Start by loading the [Beans](https://huggingface.co/datasets/beans) dataset, the `Image` feature, and the feature extractor corresponding to a pretrained [ViT](https://huggingface.co/google/vit-base-patch16-224-in21k) model:
```py
>>> from transformers import AutoFeatureExtractor
>>> from datasets import load_dataset, Image
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("google/vit-base-patch16-224-in21k")
>>> dataset = load_dataset("beans", split="train")
```
**2**. Index into the first row of the dataset. When you call the `image` column of the dataset, the underlying PIL object is automatically decoded into an image.
```py
>>> dataset[0]["image"]
```
**3**. Now, you can apply some transforms to the image. Feel free to take a look at the [various transforms available](https://pytorch.org/vision/stable/auto_examples/plot_transforms.html#sphx-glr-auto-examples-plot-transforms-py) in torchvision and choose one you'd like to experiment with. This example applies a transform that randomly rotates the image:
```py
>>> from torchvision.transforms import RandomRotation
>>> rotate = RandomRotation(degrees=(0, 90))
>>> def transforms(examples):
... examples["pixel_values"] = [rotate(image.convert("RGB")) for image in examples["image"]]
... return examples
```
**4**. Use the [`~Dataset.set_transform`] function to apply the transform on-the-fly. When you index into the image `pixel_values`, the transform is applied, and your image gets rotated.
```py
>>> dataset.set_transform(transforms)
>>> dataset[0]["pixel_values"]
```
**5**. The dataset is now ready for training with your machine learning framework! | 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/semantic_segmentation.mdx | # Semantic segmentation
Semantic segmentation datasets are used to train a model to classify every pixel in an image. There are
a wide variety of applications enabled by these datasets such as background removal from images, stylizing
images, or scene understanding for autonomous driving. This guide will show you how to apply transformations
to an image segmentation dataset.
Before you start, make sure you have up-to-date versions of `albumentations` and `cv2` installed:
```bash
pip install -U albumentations opencv-python
```
[Albumentations](https://albumentations.ai/) is a Python library for performing data augmentation
for computer vision. It supports various computer vision tasks such as image classification, object
detection, segmentation, and keypoint estimation.
This guide uses the [Scene Parsing](https://huggingface.co/datasets/scene_parse_150) dataset for segmenting
and parsing an image into different image regions associated with semantic categories, such as sky, road, person, and bed.
Load the `train` split of the dataset and take a look at an example:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("scene_parse_150", split="train")
>>> index = 10
>>> dataset[index]
{'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=683x512 at 0x7FB37B0EC810>,
'annotation': <PIL.PngImagePlugin.PngImageFile image mode=L size=683x512 at 0x7FB37B0EC9D0>,
'scene_category': 927}
```
The dataset has three fields:
* `image`: a PIL image object.
* `annotation`: segmentation mask of the image.
* `scene_category`: the label or scene category of the image (like βkitchenβ or βofficeβ).
Next, check out an image with:
```py
>>> dataset[index]["image"]
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/image_seg.png">
</div>
Similarly, you can check out the respective segmentation mask:
```py
>>> dataset[index]["annotation"]
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/seg_mask.png">
</div>
We can also add a [color palette](https://github.com/tensorflow/models/blob/3f1ca33afe3c1631b733ea7e40c294273b9e406d/research/deeplab/utils/get_dataset_colormap.py#L51) on the
segmentation mask and overlay it on top of the original image to visualize the dataset:
After defining the color palette, you should be ready to visualize some overlays.
```py
>>> import matplotlib.pyplot as plt
>>> def visualize_seg_mask(image: np.ndarray, mask: np.ndarray):
... color_seg = np.zeros((mask.shape[0], mask.shape[1], 3), dtype=np.uint8)
... palette = np.array(create_ade20k_label_colormap())
... for label, color in enumerate(palette):
... color_seg[mask == label, :] = color
... color_seg = color_seg[..., ::-1] # convert to BGR
... img = np.array(image) * 0.5 + color_seg * 0.5 # plot the image with the segmentation map
... img = img.astype(np.uint8)
... plt.figure(figsize=(15, 10))
... plt.imshow(img)
... plt.axis("off")
... plt.show()
>>> visualize_seg_mask(
... np.array(dataset[index]["image"]),
... np.array(dataset[index]["annotation"])
... )
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/seg_overlay.png">
</div>
Now apply some augmentations with `albumentations`. Youβll first resize the image and adjust its brightness.
```py
>>> import albumentations
>>> transform = albumentations.Compose(
... [
... albumentations.Resize(256, 256),
... albumentations.RandomBrightnessContrast(brightness_limit=0.3, contrast_limit=0.3, p=0.5),
... ]
... )
```
Create a function to apply the transformation to the images:
```py
>>> def transforms(examples):
... transformed_images, transformed_masks = [], []
...
... for image, seg_mask in zip(examples["image"], examples["annotation"]):
... image, seg_mask = np.array(image), np.array(seg_mask)
... transformed = transform(image=image, mask=seg_mask)
... transformed_images.append(transformed["image"])
... transformed_masks.append(transformed["mask"])
...
... examples["pixel_values"] = transformed_images
... examples["label"] = transformed_masks
... return examples
```
Use the [`~Dataset.set_transform`] function to apply the transformation on-the-fly to batches of the dataset to consume less disk space:
```py
>>> dataset.set_transform(transforms)
```
You can verify the transformation worked by indexing into the `pixel_values` and `label` of an example:
```py
>>> image = np.array(dataset[index]["pixel_values"])
>>> mask = np.array(dataset[index]["label"])
>>> visualize_seg_mask(image, mask)
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/albumentations_seg.png">
</div>
In this guide, you have used `albumentations` for augmenting the dataset. It's also possible to use `torchvision` to apply some similar transforms.
```py
>>> from torchvision.transforms import Resize, ColorJitter, Compose
>>> transformation_chain = Compose([
... Resize((256, 256)),
... ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.1)
... ])
>>> resize = Resize((256, 256))
>>> def train_transforms(example_batch):
... example_batch["pixel_values"] = [transformation_chain(x) for x in example_batch["image"]]
... example_batch["label"] = [resize(x) for x in example_batch["annotation"]]
... return example_batch
>>> dataset.set_transform(train_transforms)
>>> image = np.array(dataset[index]["pixel_values"])
>>> mask = np.array(dataset[index]["label"])
>>> visualize_seg_mask(image, mask)
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/torchvision_seg.png">
</div>
<Tip>
Now that you know how to process a dataset for semantic segmentation, learn
[how to train a semantic segmentation model](https://huggingface.co/docs/transformers/tasks/semantic_segmentation)
and use it for inference.
</Tip> | 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/audio_load.mdx | # Load audio data
You can load an audio dataset using the [`Audio`] feature that automatically decodes and resamples the audio files when you access the examples.
Audio decoding is based on the [`soundfile`](https://github.com/bastibe/python-soundfile) python package, which uses the [`libsndfile`](https://github.com/libsndfile/libsndfile) C library under the hood.
## Installation
To work with audio datasets, you need to have the `audio` dependencies installed.
Check out the [installation](./installation#audio) guide to learn how to install it.
## Local files
You can load your own dataset using the paths to your audio files. Use the [`~Dataset.cast_column`] function to take a column of audio file paths, and cast it to the [`Audio`] feature:
```py
>>> audio_dataset = Dataset.from_dict({"audio": ["path/to/audio_1", "path/to/audio_2", ..., "path/to/audio_n"]}).cast_column("audio", Audio())
>>> audio_dataset[0]["audio"]
{'array': array([ 0. , 0.00024414, -0.00024414, ..., -0.00024414,
0. , 0. ], dtype=float32),
'path': 'path/to/audio_1',
'sampling_rate': 16000}
```
## AudioFolder
You can also load a dataset with an `AudioFolder` dataset builder. It does not require writing a custom dataloader, making it useful for quickly creating and loading audio datasets with several thousand audio files.
## AudioFolder with metadata
To link your audio files with metadata information, make sure your dataset has a `metadata.csv` file. Your dataset structure might look like:
```
folder/train/metadata.csv
folder/train/first_audio_file.mp3
folder/train/second_audio_file.mp3
folder/train/third_audio_file.mp3
```
Your `metadata.csv` file must have a `file_name` column which links audio files with their metadata. An example `metadata.csv` file might look like:
```text
file_name,transcription
first_audio_file.mp3,znowu siΔ duch z ciaΕem zroΕnie w mΕodocianej wstaniesz wiosnie i moΕΌesz skutkiem tych lekΓ³w umieraΔ wstawaΔ wiek wiekΓ³w dalej tam byΕy przestrogi jak siekaΔ gΕowΔ jak nogi
second_audio_file.mp3,juΕΌ u ΕΊwierzyΕca podwojΓ³w krΓ³l zasiada przy nim ksiΔ
ΕΌΔta i panowie rada a gdzie wzniosΕy krΔ
ΕΌyΕ ganek rycerze obok kochanek krΓ³l skinΔ
Ε palcem zaczΔto igrzysko
third_audio_file.mp3,pewnie kΔdyΕ w obΕΔdzie ubite minΔΕy szlaki zaczekajmy dzieΕ jaki poΕlemy szukaΔ wszΔdzie dziΕ jutro pewnie bΔdzie posΕali wszΔdzie sΕugi czekali dzieΕ i drugi gdy nic nie doczekali z pΕaczem chcΔ
jechaΔ dali
```
`AudioFolder` will load audio data and create a `transcription` column containing texts from `metadata.csv`:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("audiofolder", data_dir="/path/to/folder")
>>> # OR by specifying the list of files
>>> dataset = load_dataset("audiofolder", data_files=["path/to/audio_1", "path/to/audio_2", ..., "path/to/audio_n"])
```
You can load remote datasets from their URLs with the data_files parameter:
```py
>>> dataset = load_dataset("audiofolder", data_files=["https://foo.bar/audio_1", "https://foo.bar/audio_2", ..., "https://foo.bar/audio_n"]
>>> # for example, pass SpeechCommands archive:
>>> dataset = load_dataset("audiofolder", data_files="https://s3.amazonaws.com/datasets.huggingface.co/SpeechCommands/v0.01/v0.01_test.tar.gz")
```
Metadata can also be specified as JSON Lines, in which case use `metadata.jsonl` as the name of the metadata file. This format is helpful in scenarios when one of the columns is complex, e.g. a list of floats, to avoid parsing errors or reading the complex values as strings.
To ignore the information in the metadata file, set `drop_metadata=True` in [`load_dataset`]:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("audiofolder", data_dir="/path/to/folder", drop_metadata=True)
```
If you don't have a metadata file, `AudioFolder` automatically infers the label name from the directory name.
If you want to drop automatically created labels, set `drop_labels=True`.
In this case, your dataset will only contain an audio column:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("audiofolder", data_dir="/path/to/folder_without_metadata", drop_labels=True)
```
<Tip>
For more information about creating your own `AudioFolder` dataset, take a look at the [Create an audio dataset](./audio_dataset) guide.
</Tip>
For a guide on how to load any type of dataset, take a look at the <a class="underline decoration-sky-400 decoration-2 font-semibold" href="./loading">general loading guide</a>.
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/index.mdx | # Datasets
<img class="float-left !m-0 !border-0 !dark:border-0 !shadow-none !max-w-lg w-[150px]" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/datasets_logo.png"/>
π€ Datasets is a library for easily accessing and sharing datasets for Audio, Computer Vision, and Natural Language Processing (NLP) tasks.
Load a dataset in a single line of code, and use our powerful data processing methods to quickly get your dataset ready for training in a deep learning model. Backed by the Apache Arrow format, process large datasets with zero-copy reads without any memory constraints for optimal speed and efficiency. We also feature a deep integration with the [Hugging Face Hub](https://huggingface.co/datasets), allowing you to easily load and share a dataset with the wider machine learning community.
Find your dataset today on the [Hugging Face Hub](https://huggingface.co/datasets), and take an in-depth look inside of it with the live viewer.
<div class="mt-10">
<div class="w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-2 md:gap-y-4 md:gap-x-5">
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./tutorial"
><div class="w-full text-center bg-gradient-to-br from-blue-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Tutorials</div>
<p class="text-gray-700">Learn the basics and become familiar with loading, accessing, and processing a dataset. Start here if you are using π€ Datasets for the first time!</p>
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./how_to"
><div class="w-full text-center bg-gradient-to-br from-indigo-400 to-indigo-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">How-to guides</div>
<p class="text-gray-700">Practical guides to help you achieve a specific goal. Take a look at these guides to learn how to use π€ Datasets to solve real-world problems.</p>
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./about_arrow"
><div class="w-full text-center bg-gradient-to-br from-pink-400 to-pink-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Conceptual guides</div>
<p class="text-gray-700">High-level explanations for building a better understanding about important topics such as the underlying data format, the cache, and how datasets are generated.</p>
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./package_reference/main_classes"
><div class="w-full text-center bg-gradient-to-br from-purple-400 to-purple-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Reference</div>
<p class="text-gray-700">Technical descriptions of how π€ Datasets classes and methods work.</p>
</a>
</div>
</div>
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/share.mdx | # Share a dataset using the CLI
At Hugging Face, we are on a mission to democratize good Machine Learning and we believe in the value of open source. That's why we designed π€ Datasets so that anyone can share a dataset with the greater ML community. There are currently thousands of datasets in over 100 languages in the Hugging Face Hub, and the Hugging Face team always welcomes new contributions!
Dataset repositories offer features such as:
- Free dataset hosting
- Dataset versioning
- Commit history and diffs
- Metadata for discoverability
- Dataset cards for documentation, licensing, limitations, etc.
This guide will show you how to share a dataset that can be easily accessed by anyone.
<a id='upload_dataset_repo'></a>
## Add a dataset
You can share your dataset with the community with a dataset repository on the Hugging Face Hub.
It can also be a private dataset if you want to control who has access to it.
In a dataset repository, you can either host all your data files and [configure your dataset](./repository_structure#define-your-splits-in-yaml) to define which file goes to which split.
The following formats: CSV, TSV, JSON, JSON lines, text, Parquet, Arrow, SQLite.
The script also supports many kinds of compressed file types such as: GZ, BZ2, LZ4, LZMA or ZSTD.
For example, your dataset can be made of `.json.gz` files.
On the other hand, if your dataset is not in a supported format or if you want more control over how your dataset is loaded, you can write your own dataset script.
When loading a dataset from the Hub, all the files in the supported formats are loaded, following the [repository structure](./repository_structure).
However if there's a dataset script, it is downloaded and executed to download and prepare the dataset instead.
For more information on how to load a dataset from the Hub, take a look at the [load a dataset from the Hub](./load_hub) tutorial.
### Create the repository
Sharing a community dataset will require you to create an account on [hf.co](https://huggingface.co/join) if you don't have one yet.
You can directly create a [new dataset repository](https://huggingface.co/login?next=%2Fnew-dataset) from your account on the Hugging Face Hub, but this guide will show you how to upload a dataset from the terminal.
1. Make sure you are in the virtual environment where you installed Datasets, and run the following command:
```
huggingface-cli login
```
2. Login using your Hugging Face Hub credentials, and create a new dataset repository:
```
huggingface-cli repo create your_dataset_name --type dataset
```
Add the `-organization` flag to create a repository under a specific organization:
```
huggingface-cli repo create your_dataset_name --type dataset --organization your-org-name
```
### Clone the repository
3. Install [Git LFS](https://git-lfs.github.com/) and clone your repository:
```
# Make sure you have git-lfs installed
# (https://git-lfs.github.com/)
git lfs install
git clone https://huggingface.co/datasets/namespace/your_dataset_name
```
Here the `namespace` is either your username or your organization name.
### Prepare your files
4. Now is a good time to check your directory to ensure the only files you're uploading are:
- The data files of the dataset
- The dataset card `README.md`
- (optional) `your_dataset_name.py` is your dataset loading script (optional if your data files are already in the supported formats csv/jsonl/json/parquet/txt). To create a dataset script, see the [dataset script](dataset_script) page.
### Upload your files
You can directly upload your files to your repository on the Hugging Face Hub, but this guide will show you how to upload the files from the terminal.
5. It is important to add the large data files first with `git lfs track` or else you will encounter an error later when you push your files:
```
cp /somewhere/data/*.json .
git lfs track *.json
git add .gitattributes
git add *.json
git commit -m "add json files"
```
6. (Optional) Add the dataset loading script:
```
cp /somewhere/data/load_script.py .
git add --all
```
7. Verify the files have been correctly staged. Then you can commit and push your files:
```
git status
git commit -m "First version of the your_dataset_name dataset."
git push
```
Congratulations, your dataset has now been uploaded to the Hugging Face Hub where anyone can load it in a single line of code! π₯³
```
dataset = load_dataset("namespace/your_dataset_name")
```
Finally, don't forget to enrich the dataset card to document your dataset and make it discoverable! Check out the [Create a dataset card](dataset_card) guide to learn more.
### Ask for a help and reviews
If you need help with a dataset script, feel free to check the [datasets forum](https://discuss.huggingface.co/c/datasets/10): it's possible that someone had similar issues and shared how they managed to fix them.
Then if your script is ready and if you wish your dataset script to be reviewed by the Hugging Face team, you can open a discussion in the Community tab of your dataset with this message:
```
# Dataset rewiew request for <Dataset name>
## Description
<brief description of the dataset>
## Files to review
- file1
- file2
- ...
cc @lhoestq @polinaeterna @mariosasko @albertvillanova
```
Members of the Hugging Face team will be happy to review your dataset script and give you advice.
## Datasets on GitHub (legacy)
Datasets used to be hosted on our GitHub repository, but all datasets have now been migrated to the Hugging Face Hub.
The legacy GitHub datasets were added originally on our GitHub repository and therefore don't have a namespace on the Hub: "squad", "glue", etc. unlike the other datasets that are named "username/dataset_name" or "org/dataset_name".
<Tip>
The distinction between a Hub dataset within or without a namespace only comes from the legacy sharing workflow. It does not involve any ranking, decisioning, or opinion regarding the contents of the dataset itself.
</Tip>
Those datasets are now maintained on the Hub: if you think a fix is needed, please use their "Community" tab to open a discussion or create a Pull Request.
The code of these datasets is reviewed by the Hugging Face team.
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/use_with_tensorflow.mdx | # Using Datasets with TensorFlow
This document is a quick introduction to using `datasets` with TensorFlow, with a particular focus on how to get
`tf.Tensor` objects out of our datasets, and how to stream data from Hugging Face `Dataset` objects to Keras methods
like `model.fit()`.
## Dataset format
By default, datasets return regular Python objects: integers, floats, strings, lists, etc.
To get TensorFlow tensors instead, you can set the format of the dataset to `tf`:
```py
>>> from datasets import Dataset
>>> data = [[1, 2],[3, 4]]
>>> ds = Dataset.from_dict({"data": data})
>>> ds = ds.with_format("tf")
>>> ds[0]
{'data': <tf.Tensor: shape=(2,), dtype=int64, numpy=array([1, 2])>}
>>> ds[:2]
{'data': <tf.Tensor: shape=(2, 2), dtype=int64, numpy=
array([[1, 2],
[3, 4]])>}
```
<Tip>
A [`Dataset`] object is a wrapper of an Arrow table, which allows fast reads from arrays in the dataset to TensorFlow tensors.
</Tip>
This can be useful for converting your dataset to a dict of `Tensor` objects, or for writing a generator to load TF
samples from it. If you wish to convert the entire dataset to `Tensor`, simply query the full dataset:
```py
>>> ds[:]
{'data': <tf.Tensor: shape=(2, 2), dtype=int64, numpy=
array([[1, 2],
[3, 4]])>}
```
## N-dimensional arrays
If your dataset consists of N-dimensional arrays, you will see that by default they are considered as nested lists.
In particular, a TensorFlow formatted dataset outputs a `RaggedTensor` instead of a single tensor:
```py
>>> from datasets import Dataset
>>> data = [[[1, 2],[3, 4]],[[5, 6],[7, 8]]]
>>> ds = Dataset.from_dict({"data": data})
>>> ds = ds.with_format("tf")
>>> ds[0]
{'data': <tf.RaggedTensor [[1, 2], [3, 4]]>}
```
To get a single tensor, you must explicitly use the [`Array`] feature type and specify the shape of your tensors:
```py
>>> from datasets import Dataset, Features, Array2D
>>> data = [[[1, 2],[3, 4]],[[5, 6],[7, 8]]]
>>> features = Features({"data": Array2D(shape=(2, 2), dtype='int32')})
>>> ds = Dataset.from_dict({"data": data}, features=features)
>>> ds = ds.with_format("tf")
>>> ds[0]
{'data': <tf.Tensor: shape=(2, 2), dtype=int64, numpy=
array([[1, 2],
[3, 4]])>}
>>> ds[:2]
{'data': <tf.Tensor: shape=(2, 2, 2), dtype=int64, numpy=
array([[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]])>}
```
## Other feature types
[`ClassLabel`] data are properly converted to tensors:
```py
>>> from datasets import Dataset, Features, ClassLabel
>>> labels = [0, 0, 1]
>>> features = Features({"label": ClassLabel(names=["negative", "positive"])})
>>> ds = Dataset.from_dict({"label": labels}, features=features)
>>> ds = ds.with_format("tf")
>>> ds[:3]
{'label': <tf.Tensor: shape=(3,), dtype=int64, numpy=array([0, 0, 1])>}
```
Strings and binary objects are also supported:
```py
>>> from datasets import Dataset, Features
>>> text = ["foo", "bar"]
>>> data = [0, 1]
>>> ds = Dataset.from_dict({"text": text, "data": data})
>>> ds = ds.with_format("tf")
>>> ds[:2]
{'text': <tf.Tensor: shape=(2,), dtype=string, numpy=array([b'foo', b'bar'], dtype=object)>,
'data': <tf.Tensor: shape=(2,), dtype=int64, numpy=array([0, 1])>}
```
You can also explicitly format certain columns and leave the other columns unformatted:
```py
>>> ds = ds.with_format("tf", columns=["data"], output_all_columns=True)
>>> ds[:2]
{'data': <tf.Tensor: shape=(2,), dtype=int64, numpy=array([0, 1])>,
'text': ['foo', 'bar']}
```
String and binary objects are unchanged, since PyTorch only supports numbers.
The [`Image`] and [`Audio`] feature types are also supported.
<Tip>
To use the [`Image`] feature type, you'll need to install the `vision` extra as
`pip install datasets[vision]`.
</Tip>
```py
>>> from datasets import Dataset, Features, Audio, Image
>>> images = ["path/to/image.png"] * 10
>>> features = Features({"image": Image()})
>>> ds = Dataset.from_dict({"image": images}, features=features)
>>> ds = ds.with_format("tf")
>>> ds[0]
{'image': <tf.Tensor: shape=(512, 512, 4), dtype=uint8, numpy=
array([[[255, 215, 106, 255],
[255, 215, 106, 255],
...,
[255, 255, 255, 255],
[255, 255, 255, 255]]], dtype=uint8)>}
>>> ds[:2]
{'image': <tf.Tensor: shape=(2, 512, 512, 4), dtype=uint8, numpy=
array([[[[255, 215, 106, 255],
[255, 215, 106, 255],
...,
[255, 255, 255, 255],
[255, 255, 255, 255]]]], dtype=uint8)>}
```
<Tip>
To use the [`Audio`] feature type, you'll need to install the `audio` extra as
`pip install datasets[audio]`.
</Tip>
```py
>>> from datasets import Dataset, Features, Audio, Image
>>> audio = ["path/to/audio.wav"] * 10
>>> features = Features({"audio": Audio()})
>>> ds = Dataset.from_dict({"audio": audio}, features=features)
>>> ds = ds.with_format("tf")
>>> ds[0]["audio"]["array"]
<tf.Tensor: shape=(202311,), dtype=float32, numpy=
array([ 6.1035156e-05, 1.5258789e-05, 1.6784668e-04, ...,
-1.5258789e-05, -1.5258789e-05, 1.5258789e-05], dtype=float32)>
>>> ds[0]["audio"]["sampling_rate"]
<tf.Tensor: shape=(), dtype=int32, numpy=44100>
```
## Data loading
Although you can load individual samples and batches just by indexing into your dataset, this won't work if you want
to use Keras methods like `fit()` and `predict()`. You could write a generator function that shuffles and loads batches
from your dataset and `fit()` on that, but that sounds like a lot of unnecessary work. Instead, if you want to stream
data from your dataset on-the-fly, we recommend converting your dataset to a `tf.data.Dataset` using the
`to_tf_dataset()` method.
The `tf.data.Dataset` class covers a wide range of use-cases - it is often created from Tensors in memory, or using a load function to read files on disc
or external storage. The dataset can be transformed arbitrarily with the `map()` method, or methods like `batch()`
and `shuffle()` can be used to create a dataset that's ready for training. These methods do not modify the stored data
in any way - instead, the methods build a data pipeline graph that will be executed when the dataset is iterated over,
usually during model training or inference. This is different from the `map()` method of Hugging Face `Dataset` objects,
which runs the map function immediately and saves the new or changed columns.
Since the entire data preprocessing pipeline can be compiled in a `tf.data.Dataset`, this approach allows for massively
parallel, asynchronous data loading and training. However, the requirement for graph compilation can be a limitation,
particularly for Hugging Face tokenizers, which are usually not (yet!) compilable as part of a TF graph. As a result,
we usually advise pre-processing the dataset as a Hugging Face dataset, where arbitrary Python functions can be
used, and then converting to `tf.data.Dataset` afterwards using `to_tf_dataset()` to get a batched dataset ready for
training. To see examples of this approach, please see the [examples](https://github.com/huggingface/transformers/tree/main/examples) or [notebooks](https://huggingface.co/docs/transformers/notebooks) for `transformers`.
### Using `to_tf_dataset()`
Using `to_tf_dataset()` is straightforward. Once your dataset is preprocessed and ready, simply call it like so:
```py
>>> from datasets import Dataset
>>> data = {"inputs": [[1, 2],[3, 4]], "labels": [0, 1]}
>>> ds = Dataset.from_dict(data)
>>> tf_ds = ds.to_tf_dataset(
columns=["inputs"],
label_cols=["labels"],
batch_size=2,
shuffle=True
)
```
The returned `tf_ds` object here is now fully ready to train on, and can be passed directly to `model.fit()`. Note
that you set the batch size when creating the dataset, and so you don't need to specify it when calling `fit()`:
```py
>>> model.fit(tf_ds, epochs=2)
```
For a full description of the arguments, please see the [`~Dataset.to_tf_dataset`] documentation. In many cases,
you will also need to add a `collate_fn` to your call. This is a function that takes multiple elements of the dataset
and combines them into a single batch. When all elements have the same length, the built-in default collator will
suffice, but for more complex tasks a custom collator may be necessary. In particular, many tasks have samples
with varying sequence lengths which will require a [data collator](https://huggingface.co/docs/transformers/main/en/main_classes/data_collator) that can pad batches correctly. You can see examples
of this in the `transformers` NLP [examples](https://github.com/huggingface/transformers/tree/main/examples) and
[notebooks](https://huggingface.co/docs/transformers/notebooks), where variable sequence lengths are very common.
If you find that loading with `to_tf_dataset` is slow, you can also use the `num_workers` argument. This spins
up multiple subprocesses to load data in parallel. This feature is recent and still somewhat experimental - please file
an issue if you encounter any bugs while using it!
### When to use to_tf_dataset
The astute reader may have noticed at this point that we have offered two approaches to achieve the same goal - if you
want to pass your dataset to a TensorFlow model, you can either convert the dataset to a `Tensor` or `dict` of `Tensors`
using `.with_format('tf')`, or you can convert the dataset to a `tf.data.Dataset` with `to_tf_dataset()`. Either of these
can be passed to `model.fit()`, so which should you choose?
The key thing to recognize is that when you convert the whole dataset to `Tensor`s, it is static and fully loaded into
RAM. This is simple and convenient, but if any of the following apply, you should probably use `to_tf_dataset()`
instead:
- Your dataset is too large to fit in RAM. `to_tf_dataset()` streams only one batch at a time, so even very large
datasets can be handled with this method.
- You want to apply random transformations using `dataset.with_transform()` or the `collate_fn`. This is
common in several modalities, such as image augmentations when training vision models, or random masking when training
masked language models. Using `to_tf_dataset()` will apply those transformations
at the moment when a batch is loaded, which means the same samples will get different augmentations each time
they are loaded. This is usually what you want.
- Your data has a variable dimension, such as input texts in NLP that consist of varying
numbers of tokens. When you create a batch with samples with a variable dimension, the standard solution is to
pad the shorter samples to the length of the longest one. When you stream samples from a dataset with `to_tf_dataset`,
you can apply this padding to each batch via your `collate_fn`. However, if you want to convert
such a dataset to dense `Tensor`s, then you will have to pad samples to the length of the longest sample in *the
entire dataset!* This can result in huge amounts of padding, which wastes memory and reduces your model's speed.
### Caveats and limitations
Right now, `to_tf_dataset()` always returns a batched dataset - we will add support for unbatched datasets soon!
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/installation.md | # Installation
Before you start, you'll need to setup your environment and install the appropriate packages. π€ Datasets is tested on **Python 3.7+**.
<Tip>
If you want to use π€ Datasets with TensorFlow or PyTorch, you'll need to install them separately. Refer to the [TensorFlow installation page](https://www.tensorflow.org/install/pip#tensorflow-2-packages-are-available) or the [PyTorch installation page](https://pytorch.org/get-started/locally/#start-locally) for the specific install command for your framework.
</Tip>
## Virtual environment
You should install π€ Datasets in a [virtual environment](https://docs.python.org/3/library/venv.html) to keep things tidy and avoid dependency conflicts.
1. Create and navigate to your project directory:
```bash
mkdir ~/my-project
cd ~/my-project
```
2. Start a virtual environment inside your directory:
```bash
python -m venv .env
```
3. Activate and deactivate the virtual environment with the following commands:
```bash
# Activate the virtual environment
source .env/bin/activate
# Deactivate the virtual environment
source .env/bin/deactivate
```
Once you've created your virtual environment, you can install π€ Datasets in it.
## pip
The most straightforward way to install π€ Datasets is with pip:
```bash
pip install datasets
```
Run the following command to check if π€ Datasets has been properly installed:
```bash
python -c "from datasets import load_dataset; print(load_dataset('squad', split='train')[0])"
```
This command downloads version 1 of the [Stanford Question Answering Dataset (SQuAD)](https://rajpurkar.github.io/SQuAD-explorer/), loads the training split, and prints the first training example. You should see:
```python
{'answers': {'answer_start': [515], 'text': ['Saint Bernadette Soubirous']}, 'context': 'Architecturally, the school has a Catholic character. Atop the Main Building\'s gold dome is a golden statue of the Virgin Mary. Immediately in front of the Main Building and facing it, is a copper statue of Christ with arms upraised with the legend "Venite Ad Me Omnes". Next to the Main Building is the Basilica of the Sacred Heart. Immediately behind the basilica is the Grotto, a Marian place of prayer and reflection. It is a replica of the grotto at Lourdes, France where the Virgin Mary reputedly appeared to Saint Bernadette Soubirous in 1858. At the end of the main drive (and in a direct line that connects through 3 statues and the Gold Dome), is a simple, modern stone statue of Mary.', 'id': '5733be284776f41900661182', 'question': 'To whom did the Virgin Mary allegedly appear in 1858 in Lourdes France?', 'title': 'University_of_Notre_Dame'}
```
## Audio
To work with audio datasets, you need to install the [`Audio`] feature as an extra dependency:
```bash
pip install datasets[audio]
```
<Tip warning={true}>
To decode mp3 files, you need to have at least version 1.1.0 of the `libsndfile` system library. Usually, it's bundled with the python [`soundfile`](https://github.com/bastibe/python-soundfile) package, which is installed as an extra audio dependency for π€ Datasets.
For Linux, the required version of `libsndfile` is bundled with `soundfile` starting from version 0.12.0. You can run the following command to determine which version of `libsndfile` is being used by `soundfile`:
```bash
python -c "import soundfile; print(soundfile.__libsndfile_version__)"
```
</Tip>
## Vision
To work with image datasets, you need to install the [`Image`] feature as an extra dependency:
```bash
pip install datasets[vision]
```
## source
Building π€ Datasets from source lets you make changes to the code base. To install from the source, clone the repository and install with the following commands:
```bash
git clone https://github.com/huggingface/datasets.git
cd datasets
pip install -e .
```
Again, you can check if π€ Datasets was properly installed with the following command:
```bash
python -c "from datasets import load_dataset; print(load_dataset('squad', split='train')[0])"
```
## conda
π€ Datasets can also be installed from conda, a package management system:
```bash
conda install -c huggingface -c conda-forge datasets
```
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/dataset_script.mdx | # Create a dataset loading script
<Tip>
The dataset script is likely not needed if your dataset is in one of the following formats: CSV, JSON, JSON lines, text or Parquet.
With those formats, you should be able to load your dataset automatically with [`~datasets.load_dataset`],
as long as your dataset repository has a [required structure](./repository_structure).
</Tip>
Write a dataset script to load and share datasets that consist of data files in unsupported formats or require more complex data preparation.
This is a more advanced way to define a dataset than using [YAML metadata in the dataset card](./repository_structure#define-your-splits-in-yaml).
A dataset script is a Python file that defines the different configurations and splits of your dataset, as well as how to download and process the data.
The script can download data files from any website, or from the same dataset repository.
A dataset loading script should have the same name as a dataset repository or directory. For example, a repository named `my_dataset` should contain `my_dataset.py` script. This way it can be loaded with:
```
my_dataset/
βββ README.md
βββ my_dataset.py
```
```py
>>> from datasets import load_dataset
>>> load_dataset("path/to/my_dataset")
```
The following guide includes instructions for dataset scripts for how to:
- Add dataset metadata.
- Download data files.
- Generate samples.
- Generate dataset metadata.
- Upload a dataset to the Hub.
Open the [SQuAD dataset loading script](https://huggingface.co/datasets/squad/blob/main/squad.py) template to follow along on how to share a dataset.
<Tip>
To help you get started, try beginning with the dataset loading script [template](https://github.com/huggingface/datasets/blob/main/templates/new_dataset_script.py)!
</Tip>
## Add dataset attributes
The first step is to add some information, or attributes, about your dataset in [`DatasetBuilder._info`]. The most important attributes you should specify are:
1. `DatasetInfo.description` provides a concise description of your dataset. The description informs the user what's in the dataset, how it was collected, and how it can be used for a NLP task.
2. `DatasetInfo.features` defines the name and type of each column in your dataset. This will also provide the structure for each example, so it is possible to create nested subfields in a column if you want. Take a look at [`Features`] for a full list of feature types you can use.
```py
datasets.Features(
{
"id": datasets.Value("string"),
"title": datasets.Value("string"),
"context": datasets.Value("string"),
"question": datasets.Value("string"),
"answers": datasets.Sequence(
{
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
}
),
}
)
```
3. `DatasetInfo.homepage` contains the URL to the dataset homepage so users can find more details about the dataset.
4. `DatasetInfo.citation` contains a BibTeX citation for the dataset.
After you've filled out all these fields in the template, it should look like the following example from the SQuAD loading script:
```py
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"title": datasets.Value("string"),
"context": datasets.Value("string"),
"question": datasets.Value("string"),
"answers": datasets.features.Sequence(
{"text": datasets.Value("string"), "answer_start": datasets.Value("int32"),}
),
}
),
# No default supervised_keys (as we have to pass both question
# and context as input).
supervised_keys=None,
homepage="https://rajpurkar.github.io/SQuAD-explorer/",
citation=_CITATION,
)
```
### Multiple configurations
In some cases, your dataset may have multiple configurations. For example, the [SuperGLUE](https://huggingface.co/datasets/super_glue) dataset is a collection of 5 datasets designed to evaluate language understanding tasks. π€ Datasets provides [`BuilderConfig`] which allows you to create different configurations for the user to select from.
Let's study the [SuperGLUE loading script](https://huggingface.co/datasets/super_glue/blob/main/super_glue.py) to see how you can define several configurations.
1. Create a [`BuilderConfig`] subclass with attributes about your dataset. These attributes can be the features of your dataset, label classes, and a URL to the data files.
```py
class SuperGlueConfig(datasets.BuilderConfig):
"""BuilderConfig for SuperGLUE."""
def __init__(self, features, data_url, citation, url, label_classes=("False", "True"), **kwargs):
"""BuilderConfig for SuperGLUE.
Args:
features: *list[string]*, list of the features that will appear in the
feature dict. Should not include "label".
data_url: *string*, url to download the zip file from.
citation: *string*, citation for the data set.
url: *string*, url for information about the data set.
label_classes: *list[string]*, the list of classes for the label if the
label is present as a string. Non-string labels will be cast to either
'False' or 'True'.
**kwargs: keyword arguments forwarded to super.
"""
# Version history:
# 1.0.2: Fixed non-nondeterminism in ReCoRD.
# 1.0.1: Change from the pre-release trial version of SuperGLUE (v1.9) to
# the full release (v2.0).
# 1.0.0: S3 (new shuffling, sharding and slicing mechanism).
# 0.0.2: Initial version.
super().__init__(version=datasets.Version("1.0.2"), **kwargs)
self.features = features
self.label_classes = label_classes
self.data_url = data_url
self.citation = citation
self.url = url
```
2. Create instances of your config to specify the values of the attributes of each configuration. This gives you the flexibility to specify all the name and description of each configuration. These sub-class instances should be listed under `DatasetBuilder.BUILDER_CONFIGS`:
```py
class SuperGlue(datasets.GeneratorBasedBuilder):
"""The SuperGLUE benchmark."""
BUILDER_CONFIGS = [
SuperGlueConfig(
name="boolq",
description=_BOOLQ_DESCRIPTION,
features=["question", "passage"],
data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/BoolQ.zip",
citation=_BOOLQ_CITATION,
url="https://github.com/google-research-datasets/boolean-questions",
),
...
...
SuperGlueConfig(
name="axg",
description=_AXG_DESCRIPTION,
features=["premise", "hypothesis"],
label_classes=["entailment", "not_entailment"],
data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/AX-g.zip",
citation=_AXG_CITATION,
url="https://github.com/rudinger/winogender-schemas",
),
```
3. Now, users can load a specific configuration of the dataset with the configuration `name`:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset('super_glue', 'boolq')
```
### Default configurations
Users must specify a configuration name when they load a dataset with multiple configurations. Otherwise, π€ Datasets will raise a `ValueError`, and prompt the user to select a configuration name. You can avoid this by setting a default dataset configuration with the `DEFAULT_CONFIG_NAME` attribute:
```py
class NewDataset(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"),
datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"),
]
DEFAULT_CONFIG_NAME = "first_domain"
```
<Tip warning={true}>
Only use a default configuration when it makes sense. Don't set one because it may be more convenient for the user to not specify a configuration when they load your dataset. For example, multi-lingual datasets often have a separate configuration for each language. An appropriate default may be an aggregated configuration that loads all the languages of the dataset if the user doesn't request a particular one.
</Tip>
## Download data files and organize splits
After you've defined the attributes of your dataset, the next step is to download the data files and organize them according to their splits.
1. Create a dictionary of URLs in the loading script that point to the original SQuAD data files:
```py
_URL = "https://rajpurkar.github.io/SQuAD-explorer/dataset/"
_URLS = {
"train": _URL + "train-v1.1.json",
"dev": _URL + "dev-v1.1.json",
}
```
<Tip>
If the data files live in the same folder or repository of the dataset script, you can just pass the relative paths to the files instead of URLs.
</Tip>
2. [`DownloadManager.download_and_extract`] takes this dictionary and downloads the data files. Once the files are downloaded, use [`SplitGenerator`] to organize each split in the dataset. This is a simple class that contains:
- The `name` of each split. You should use the standard split names: `Split.TRAIN`, `Split.TEST`, and `Split.VALIDATION`.
- `gen_kwargs` provides the file paths to the data files to load for each split.
Your `DatasetBuilder._split_generator()` should look like this now:
```py
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
urls_to_download = self._URLS
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
]
```
## Generate samples
At this point, you have:
- Added the dataset attributes.
- Provided instructions for how to download the data files.
- Organized the splits.
The next step is to actually generate the samples in each split.
1. `DatasetBuilder._generate_examples` takes the file path provided by `gen_kwargs` to read and parse the data files. You need to write a function that loads the data files and extracts the columns.
2. Your function should yield a tuple of an `id_`, and an example from the dataset.
```py
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logger.info("generating examples from = %s", filepath)
with open(filepath) as f:
squad = json.load(f)
for article in squad["data"]:
title = article.get("title", "").strip()
for paragraph in article["paragraphs"]:
context = paragraph["context"].strip()
for qa in paragraph["qas"]:
question = qa["question"].strip()
id_ = qa["id"]
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
answers = [answer["text"].strip() for answer in qa["answers"]]
# Features currently used are "context", "question", and "answers".
# Others are extracted here for the ease of future expansions.
yield id_, {
"title": title,
"context": context,
"question": question,
"id": id_,
"answers": {"answer_start": answer_starts, "text": answers,},
}
```
## (Optional) Generate dataset metadata
Adding dataset metadata is a great way to include information about your dataset. The metadata is stored in the dataset card `README.md` in YAML. It includes information like the number of examples required to confirm the dataset was correctly generated, and information about the dataset like its `features`.
Run the following command to generate your dataset metadata in `README.md` and make sure your new dataset loading script works correctly:
```
datasets-cli test path/to/<your-dataset-loading-script> --save_info --all_configs
```
If your dataset loading script passed the test, you should now have a `README.md` file in your dataset folder containing a `dataset_info` field with some metadata.
## Upload to the Hub
Once your script is ready, [create a dataset card](dataset_card) and [upload it to the Hub](share).
Congratulations, you can now load your dataset from the Hub! π₯³
```py
>>> from datasets import load_dataset
>>> load_dataset("<username>/my_dataset")
```
## Advanced features
### Sharding
If your dataset is made of many big files, π€ Datasets automatically runs your script in parallel to make it super fast!
It can help if you have hundreds or thousands of TAR archives, or JSONL files like [oscar](https://huggingface.co/datasets/oscar/blob/main/oscar.py) for example.
To make it work, we consider lists of files in `gen_kwargs` to be shards.
Therefore π€ Datasets can automatically spawn several workers to run `_generate_examples` in parallel, and each worker is given a subset of shards to process.
```python
class MyShardedDataset(datasets.GeneratorBasedBuilder):
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
downloaded_files = dl_manager.download([f"data/shard_{i}.jsonl" for i in range(1024)])
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": downloaded_files}),
]
def _generate_examples(self, filepaths):
# Each worker can be given a slice of the original `filepaths` list defined in the `gen_kwargs`
# so that this code can run in parallel on several shards at the same time
for filepath in filepaths:
...
```
Users can also specify `num_proc=` in `load_dataset()` to specify the number of processes to use as workers.
### ArrowBasedBuilder
For some datasets it can be much faster to yield batches of data rather than examples one by one.
You can speed up the dataset generation by yielding Arrow tables directly, instead of examples.
This is especially useful if your data comes from Pandas DataFrames for example, since the conversion from Pandas to Arrow is as simple as:
```python
import pyarrow as pa
pa_table = pa.Table.from_pandas(df)
```
To yield Arrow tables instead of single examples, make your dataset builder inherit from [`ArrowBasedBuilder`] instead of [`GeneratorBasedBuilder`], and use `_generate_tables` instead of `_generate_examples`:
```python
class MySuperFastDataset(datasets.ArrowBasedBuilder):
def _generate_tables(self, filepaths):
idx = 0
for filepath in filepaths:
...
yield idx, pa_table
idx += 1
```
Don't forget to keep your script memory efficient, in case users run them on machines with a low amount of RAM.
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/_toctree.yml | - sections:
- local: index
title: π€ Datasets
- local: quickstart
title: Quickstart
- local: installation
title: Installation
title: Get started
- sections:
- local: tutorial
title: Overview
- local: load_hub
title: Load a dataset from the Hub
- local: access
title: Know your dataset
- local: use_dataset
title: Preprocess
- local: metrics
title: Evaluate predictions
- local: create_dataset
title: Create a dataset
- local: upload_dataset
title: Share a dataset to the Hub
title: "Tutorials"
- sections:
- local: how_to
title: Overview
- sections:
- local: loading
title: Load
- local: process
title: Process
- local: stream
title: Stream
- local: use_with_tensorflow
title: Use with TensorFlow
- local: use_with_pytorch
title: Use with PyTorch
- local: use_with_jax
title: Use with JAX
- local: use_with_spark
title: Use with Spark
- local: cache
title: Cache management
- local: filesystems
title: Cloud storage
- local: faiss_es
title: Search index
- local: how_to_metrics
title: Metrics
- local: beam
title: Beam Datasets
title: "General usage"
- sections:
- local: audio_load
title: Load audio data
- local: audio_process
title: Process audio data
- local: audio_dataset
title: Create an audio dataset
title: "Audio"
- sections:
- local: image_load
title: Load image data
- local: image_process
title: Process image data
- local: image_dataset
title: Create an image dataset
- local: depth_estimation
title: Depth estimation
- local: image_classification
title: Image classification
- local: semantic_segmentation
title: Semantic segmentation
- local: object_detection
title: Object detection
title: "Vision"
- sections:
- local: nlp_load
title: Load text data
- local: nlp_process
title: Process text data
title: "Text"
- sections:
- local: tabular_load
title: Load tabular data
title: "Tabular"
- sections:
- local: share
title: Share
- local: dataset_card
title: Create a dataset card
- local: repository_structure
title: Structure your repository
- local: dataset_script
title: Create a dataset loading script
title: "Dataset repository"
title: "How-to guides"
- sections:
- local: about_arrow
title: Datasets π€ Arrow
- local: about_cache
title: The cache
- local: about_mapstyle_vs_iterable
title: Dataset or IterableDataset
- local: about_dataset_features
title: Dataset features
- local: about_dataset_load
title: Build and load
- local: about_map_batch
title: Batch mapping
- local: about_metrics
title: All about metrics
title: "Conceptual guides"
- sections:
- local: package_reference/main_classes
title: Main classes
- local: package_reference/builder_classes
title: Builder classes
- local: package_reference/loading_methods
title: Loading methods
- local: package_reference/table_classes
title: Table Classes
- local: package_reference/logging_methods
title: Logging methods
- local: package_reference/task_templates
title: Task templates
title: "Reference"
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/about_dataset_load.mdx | # Build and load
Nearly every deep learning workflow begins with loading a dataset, which makes it one of the most important steps. With π€ Datasets, there are more than 900 datasets available to help you get started with your NLP task. All you have to do is call: [`load_dataset`] to take your first step. This function is a true workhorse in every sense because it builds and loads every dataset you use.
## ELI5: `load_dataset`
Let's begin with a basic Explain Like I'm Five.
A dataset is a directory that contains:
- Some data files in generic formats (JSON, CSV, Parquet, text, etc.)
- A dataset card named `README.md` that contains documentation about the dataset as well as a YAML header to define the datasets tags and configurations
- An optional dataset script if it requires some code to read the data files. This is sometimes used to load files of specific formats and structures.
The [`load_dataset`] function fetches the requested dataset locally or from the Hugging Face Hub.
The Hub is a central repository where all the Hugging Face datasets and models are stored.
If the dataset only contains data files, then [`load_dataset`] automatically infers how to load the data files from their extensions (json, csv, parquet, txt, etc.).
Under the hood, π€ Datasets will use an appropriate [`DatasetBuilder`] based on the data files format. There exist one builder per data file format in π€ Datasets:
* [`datasets.packaged_modules.text.Text`] for text
* [`datasets.packaged_modules.csv.Csv`] for CSV and TSV
* [`datasets.packaged_modules.json.Json`] for JSON and JSONL
* [`datasets.packaged_modules.parquet.Parquet`] for Parquet
* [`datasets.packaged_modules.arrow.Arrow`] for Arrow (streaming file format)
* [`datasets.packaged_modules.sql.Sql`] for SQL databases
* [`datasets.packaged_modules.imagefolder.ImageFolder`] for image folders
* [`datasets.packaged_modules.audiofolder.AudioFolder`] for audio folders
If the dataset has a dataset script, then it downloads and imports it from the Hugging Face Hub.
Code in the dataset script defines a custom [`DatasetBuilder`] the dataset information (description, features, URL to the original files, etc.), and tells π€ Datasets how to generate and display examples from it.
<Tip>
Read the [Share](./upload_dataset) section to learn more about how to share a dataset. This section also provides a step-by-step guide on how to write your own dataset loading script!
</Tip>
π€ Datasets downloads the dataset files from the original URL, generates the dataset and caches it in an Arrow table on your drive.
If you've downloaded the dataset before, then π€ Datasets will reload it from the cache to save you the trouble of downloading it again.
Now that you have a high-level understanding about how datasets are built, let's take a closer look at the nuts and bolts of how all this works.
## Building a dataset
When you load a dataset for the first time, π€ Datasets takes the raw data file and builds it into a table of rows and typed columns. There are two main classes responsible for building a dataset: [`BuilderConfig`] and [`DatasetBuilder`].
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/builderconfig.png"/>
</div>
### BuilderConfig[[datasets-builderconfig]]
[`BuilderConfig`] is the configuration class of [`DatasetBuilder`]. The [`BuilderConfig`] contains the following basic attributes about a dataset:
| Attribute | Description |
|---------------|--------------------------------------------------------------|
| `name` | Short name of the dataset. |
| `version` | Dataset version identifier. |
| `data_dir` | Stores the path to a local folder containing the data files. |
| `data_files` | Stores paths to local data files. |
| `description` | Description of the dataset. |
If you want to add additional attributes to your dataset such as the class labels, you can subclass the base [`BuilderConfig`] class. There are two ways to populate the attributes of a [`BuilderConfig`] class or subclass:
- Provide a list of predefined [`BuilderConfig`] class (or subclass) instances in the datasets [`DatasetBuilder.BUILDER_CONFIGS`] attribute.
- When you call [`load_dataset`], any keyword arguments that are not specific to the method will be used to set the associated attributes of the [`BuilderConfig`] class. This will override the predefined attributes if a specific configuration was selected.
You can also set the [`DatasetBuilder.BUILDER_CONFIG_CLASS`] to any custom subclass of [`BuilderConfig`].
### DatasetBuilder[[datasets-datasetbuilder]]
[`DatasetBuilder`] accesses all the attributes inside [`BuilderConfig`] to build the actual dataset.
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/datasetbuilder.png"/>
</div>
There are three main methods in [`DatasetBuilder`]:
1. [`DatasetBuilder._info`] is in charge of defining the dataset attributes. When you call `dataset.info`, π€ Datasets returns the information stored here. Likewise, the [`Features`] are also specified here. Remember, the [`Features`] are like the skeleton of the dataset. It provides the names and types of each column.
2. [`DatasetBuilder._split_generator`] downloads or retrieves the requested data files, organizes them into splits, and defines specific arguments for the generation process. This method has a [`DownloadManager`] that downloads files or fetches them from your local filesystem. Within the [`DownloadManager`], there is a [`DownloadManager.download_and_extract`] method that accepts a dictionary of URLs to the original data files, and downloads the requested files. Accepted inputs include: a single URL or path, or a list/dictionary of URLs or paths. Any compressed file types like TAR, GZIP and ZIP archives will be automatically extracted.
Once the files are downloaded, [`SplitGenerator`] organizes them into splits. The [`SplitGenerator`] contains the name of the split, and any keyword arguments that are provided to the [`DatasetBuilder._generate_examples`] method. The keyword arguments can be specific to each split, and typically comprise at least the local path to the data files for each split.
<Tip>
[`DownloadManager.download_and_extract`] can download files from a wide range of sources. If the data files are hosted on a special access server, you should use [`DownloadManger.download_custom`]. Refer to the reference of [`DownloadManager`] for more details.
</Tip>
3. [`DatasetBuilder._generate_examples`] reads and parses the data files for a split. Then it yields dataset examples according to the format specified in the `features` from [`DatasetBuilder._info`]. The input of [`DatasetBuilder._generate_examples`] is actually the `filepath` provided in the keyword arguments of the last method.
The dataset is generated with a Python generator, which doesn't load all the data in memory. As a result, the generator can handle large datasets. However, before the generated samples are flushed to the dataset file on disk, they are stored in an `ArrowWriter` buffer. This means the generated samples are written by batch. If your dataset samples consumes a lot of memory (images or videos), then make sure to specify a low value for the `DEFAULT_WRITER_BATCH_SIZE` attribute in [`DatasetBuilder`]. We recommend not exceeding a size of 200 MB.
## Maintaining integrity
To ensure a dataset is complete, [`load_dataset`] will perform a series of tests on the downloaded files to make sure everything is there. This way, you don't encounter any surprises when your requested dataset doesn't get generated as expected. [`load_dataset`] verifies:
- The number of splits in the generated `DatasetDict`.
- The number of samples in each split of the generated `DatasetDict`.
- The list of downloaded files.
- The SHA256 checksums of the downloaded files (disabled by defaut).
If the dataset doesn't pass the verifications, it is likely that the original host of the dataset made some changes in the data files.
<Tip>
If it is your own dataset, you'll need to recompute the information above and update the `README.md` file in your dataset repository. Take a look at this [section](dataset_script#optional-generate-dataset-metadata) to learn how to generate and update this metadata.
</Tip>
In this case, an error is raised to alert that the dataset has changed.
To ignore the error, one needs to specify `verification_mode="no_checks"` in [`load_dataset`].
Anytime you see a verification error, feel free to open a discussion or pull request in the corresponding dataset "Community" tab, so that the integrity checks for that dataset are updated.
## Security
The dataset repositories on the Hub are scanned for malware, see more information [here](https://huggingface.co/docs/hub/security#malware-scanning).
Moreover the datasets without a namespace (originally contributed on our GitHub repository) have all been reviewed by our maintainers.
The code of these datasets is considered **safe**.
It concerns datasets that are not under a namespace, e.g. "squad" or "glue", unlike the other datasets that are named "username/dataset_name" or "org/dataset_name".
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/tutorial.md | # Overview
Welcome to the π€ Datasets tutorials! These beginner-friendly tutorials will guide you through the fundamentals of working with π€ Datasets. You'll load and prepare a dataset for training with your machine learning framework of choice. Along the way, you'll learn how to load different dataset configurations and splits, interact with and see what's inside your dataset, preprocess, and share a dataset to the [Hub](https://huggingface.co/datasets).
The tutorials assume some basic knowledge of Python and a machine learning framework like PyTorch or TensorFlow. If you're already familiar with these, feel free to check out the [quickstart](./quickstart) to see what you can do with π€ Datasets.
<Tip>
The tutorials only cover the basic skills you need to use π€ Datasets. There are many other useful functionalities and applications that aren't discussed here. If you're interested in learning more, take a look at [Chapter 5](https://huggingface.co/course/chapter5/1?fw=pt) of the Hugging Face course.
</Tip>
If you have any questions about π€ Datasets, feel free to join and ask the community on our [forum](https://discuss.huggingface.co/c/datasets/10).
Let's get started! π
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/about_cache.mdx | # The cache
The cache is one of the reasons why π€ Datasets is so efficient. It stores previously downloaded and processed datasets so when you need to use them again, they are reloaded directly from the cache. This avoids having to download a dataset all over again, or reapplying processing functions. Even after you close and start another Python session, π€ Datasets will reload your dataset directly from the cache!
## Fingerprint
How does the cache keeps track of what transforms are applied to a dataset? Well, π€ Datasets assigns a fingerprint to the cache file. A fingerprint keeps track of the current state of a dataset. The initial fingerprint is computed using a hash from the Arrow table, or a hash of the Arrow files if the dataset is on disk. Subsequent fingerprints are computed by combining the fingerprint of the previous state, and a hash of the latest transform applied.
<Tip>
Transforms are any of the processing methods from the [How-to Process](./process) guides such as [`Dataset.map`] or [`Dataset.shuffle`].
</Tip>
Here are what the actual fingerprints look like:
```py
>>> from datasets import Dataset
>>> dataset1 = Dataset.from_dict({"a": [0, 1, 2]})
>>> dataset2 = dataset1.map(lambda x: {"a": x["a"] + 1})
>>> print(dataset1._fingerprint, dataset2._fingerprint)
d19493523d95e2dc 5b86abacd4b42434
```
In order for a transform to be hashable, it needs to be picklable by [dill](https://dill.readthedocs.io/en/latest/) or [pickle](https://docs.python.org/3/library/pickle).
When you use a non-hashable transform, π€ Datasets uses a random fingerprint instead and raises a warning. The non-hashable transform is considered different from the previous transforms. As a result, π€ Datasets will recompute all the transforms. Make sure your transforms are serializable with pickle or dill to avoid this!
An example of when π€ Datasets recomputes everything is when caching is disabled. When this happens, the cache files are generated every time and they get written to a temporary directory. Once your Python session ends, the cache files in the temporary directory are deleted. A random hash is assigned to these cache files, instead of a fingerprint.
<Tip>
When caching is disabled, use [`Dataset.save_to_disk`] to save your transformed dataset or it will be deleted once the session ends.
</Tip>
## Hashing
The fingerprint of a dataset is updated by hashing the function passed to `map` as well as the `map` parameters (`batch_size`, `remove_columns`, etc.).
You can check the hash of any Python object using the [`fingerprint.Hasher`]:
```py
>>> from datasets.fingerprint import Hasher
>>> my_func = lambda example: {"length": len(example["text"])}
>>> print(Hasher.hash(my_func))
'3d35e2b3e94c81d6'
```
The hash is computed by dumping the object using a `dill` pickler and hashing the dumped bytes.
The pickler recursively dumps all the variables used in your function, so any change you do to an object that is used in your function, will cause the hash to change.
If one of your functions doesn't seem to have the same hash across sessions, it means at least one of its variables contains a Python object that is not deterministic.
When this happens, feel free to hash any object you find suspicious to try to find the object that caused the hash to change.
For example, if you use a list for which the order of its elements is not deterministic across sessions, then the hash won't be the same across sessions either.
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/repository_structure.mdx | # Structure your repository
To host and share your dataset, create a dataset repository on the Hugging Face Hub and upload your data files.
This guide will show you how to structure your dataset repository when you upload it.
A dataset with a supported structure and file format (`.txt`, `.csv`, `.parquet`, `.jsonl`, `.mp3`, `.jpg`, `.zip` etc.) are loaded automatically with [`~datasets.load_dataset`], and it'll have a dataset viewer on its dataset page on the Hub.
## Main use-case
The simplest dataset structure has two files: `train.csv` and `test.csv` (this works with any supported file format).
Your repository will also contain a `README.md` file, the [dataset card](dataset_card) displayed on your dataset page.
```
my_dataset_repository/
βββ README.md
βββ train.csv
βββ test.csv
```
In this simple case, you'll get a dataset with two splits: `train` (containing examples from `train.csv`) and `test` (containing examples from `test.csv`).
## Define your splits and subsets in YAML
## Splits
If you have multiple files and want to define which file goes into which split, you can use the YAML `configs` field at the top of your README.md.
For example, given a repository like this one:
```
my_dataset_repository/
βββ README.md
βββ data.csv
βββ holdout.csv
```
You can define your splits by adding the `configs` field in the YAML block at the top of your README.md:
```yaml
---
configs:
- config_name: default
data_files:
- split: train
path: "data.csv"
- split: test
path: "holdout.csv"
---
```
You can select multiple files per split using a list of paths:
```
my_dataset_repository/
βββ README.md
βββ data/
β βββ abc.csv
β βββ def.csv
βββ holdout/
βββ ghi.csv
```
```yaml
---
configs:
- config_name: default
data_files:
- split: train
path:
- "data/abc.csv"
- "data/def.csv"
- split: test
path: "holdout/ghi.csv"
---
```
Or you can use glob patterns to automatically list all the files you need:
```yaml
---
configs:
- config_name: default
data_files:
- split: train
path: "data/*.csv"
- split: test
path: "holdout/*.csv"
---
```
<Tip warning={true}>
Note that `config_name` field is required even if you have a single configuration.
</Tip>
## Configurations
Your dataset might have several subsets of data that you want to be able to load separately. In that case you can define a list of configurations inside the `configs` field in YAML:
```
my_dataset_repository/
βββ README.md
βββ main_data.csv
βββ additional_data.csv
```
```yaml
---
configs:
- config_name: main_data
data_files: "main_data.csv"
- config_name: additional_data
data_files: "additional_data.csv"
---
```
Each configuration is shown separately on the Hugging Face Hub, and can be loaded by passing its name as a second parameter:
```python
from datasets import load_dataset
main_data = load_dataset("my_dataset_repository", "main_data")
additional_data = load_dataset("my_dataset_repository", "additional_data")
```
## Builder parameters
Not only `data_files`, but other builder-specific parameters can be passed via YAML, allowing for more flexibility on how to load the data while not requiring any custom code. For example, define which separator to use in which configuration to load your `csv` files:
```yaml
---
configs:
- config_name: tab
data_files: "main_data.csv"
sep: "\t"
- config_name: comma
data_files: "additional_data.csv"
sep: ","
---
```
Refer to [specific builders' documentation](./package_reference/builder_classes) to see what configuration parameters they have.
<Tip>
You can set a default configuration using `default: true`, e.g. you can run `main_data = load_dataset("my_dataset_repository")` if you set
```yaml
- config_name: main_data
data_files: "main_data.csv"
default: true
```
</Tip>
## Automatic splits detection
If no YAML is provided, π€ Datasets searches for certain patterns in the dataset repository to automatically infer the dataset splits.
There is an order to the patterns, beginning with the custom filename split format to treating all files as a single split if no pattern is found.
### Directory name
Your data files may also be placed into different directories named `train`, `test`, and `validation` where each directory contains the data files for that split:
```
my_dataset_repository/
βββ README.md
βββ data/
βββ train/
β βββ bees.csv
βββ test/
β βββ more_bees.csv
βββ validation/
βββ even_more_bees.csv
```
### Filename splits
If you don't have any non-traditional splits, then you can place the split name anywhere in the data file and it is automatically inferred. The only rule is that the split name must be delimited by non-word characters, like `test-file.csv` for example instead of `testfile.csv`. Supported delimiters include underscores, dashes, spaces, dots, and numbers.
For example, the following file names are all acceptable:
- train split: `train.csv`, `my_train_file.csv`, `train1.csv`
- validation split: `validation.csv`, `my_validation_file.csv`, `validation1.csv`
- test split: `test.csv`, `my_test_file.csv`, `test1.csv`
Here is an example where all the files are placed into a directory named `data`:
```
my_dataset_repository/
βββ README.md
βββ data/
βββ train.csv
βββ test.csv
βββ validation.csv
```
### Custom filename split
If your dataset splits have custom names that aren't `train`, `test`, or `validation`, then you can name your data files like `data/<split_name>-xxxxx-of-xxxxx.csv`.
Here is an example with three splits, `train`, `test`, and `random`:
```
my_dataset_repository/
βββ README.md
βββ data/
βββ train-00000-of-00003.csv
βββ train-00001-of-00003.csv
βββ train-00002-of-00003.csv
βββ test-00000-of-00001.csv
βββ random-00000-of-00003.csv
βββ random-00001-of-00003.csv
βββ random-00002-of-00003.csv
```
### Single split
When π€ Datasets can't find any of the above patterns, then it'll treat all the files as a single train split. If your dataset splits aren't loading as expected, it may be due to an incorrect pattern.
### Split name keywords
There are several ways to name splits. Validation splits are sometimes called "dev", and test splits may be referred to as "eval".
These other split names are also supported, and the following keywords are equivalent:
- train, training
- validation, valid, val, dev
- test, testing, eval, evaluation
The structure below is a valid repository:
```
my_dataset_repository/
βββ README.md
βββ data/
βββ training.csv
βββ eval.csv
βββ valid.csv
```
### Multiple files per split
If one of your splits comprises several files, π€ Datasets can still infer whether it is the train, validation, and test split from the file name.
For example, if your train and test splits span several files:
```
my_dataset_repository/
βββ README.md
βββ train_0.csv
βββ train_1.csv
βββ train_2.csv
βββ train_3.csv
βββ test_0.csv
βββ test_1.csv
```
Make sure all the files of your `train` set have *train* in their names (same for test and validation).
Even if you add a prefix or suffix to `train` in the file name (like `my_train_file_00001.csv` for example),
π€ Datasets can still infer the appropriate split.
For convenience, you can also place your data files into different directories.
In this case, the split name is inferred from the directory name.
```
my_dataset_repository/
βββ README.md
βββ data/
βββ train/
β βββ shard_0.csv
β βββ shard_1.csv
β βββ shard_2.csv
β βββ shard_3.csv
βββ test/
βββ shard_0.csv
βββ shard_1.csv
```
For more flexibility over how to load and generate a dataset, you can also write a [dataset loading script](./dataset_script).
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/about_arrow.md | # Datasets π€ Arrow
## What is Arrow?
[Arrow](https://arrow.apache.org/) enables large amounts of data to be processed and moved quickly. It is a specific data format that stores data in a columnar memory layout. This provides several significant advantages:
* Arrow's standard format allows [zero-copy reads](https://en.wikipedia.org/wiki/Zero-copy) which removes virtually all serialization overhead.
* Arrow is language-agnostic so it supports different programming languages.
* Arrow is column-oriented so it is faster at querying and processing slices or columns of data.
* Arrow allows for copy-free hand-offs to standard machine learning tools such as NumPy, Pandas, PyTorch, and TensorFlow.
* Arrow supports many, possibly nested, column types.
## Memory-mapping
π€ Datasets uses Arrow for its local caching system. It allows datasets to be backed by an on-disk cache, which is memory-mapped for fast lookup.
This architecture allows for large datasets to be used on machines with relatively small device memory.
For example, loading the full English Wikipedia dataset only takes a few MB of RAM:
```python
>>> import os; import psutil; import timeit
>>> from datasets import load_dataset
# Process.memory_info is expressed in bytes, so convert to megabytes
>>> mem_before = psutil.Process(os.getpid()).memory_info().rss / (1024 * 1024)
>>> wiki = load_dataset("wikipedia", "20220301.en", split="train")
>>> mem_after = psutil.Process(os.getpid()).memory_info().rss / (1024 * 1024)
>>> print(f"RAM memory used: {(mem_after - mem_before)} MB")
RAM memory used: 50 MB
```
This is possible because the Arrow data is actually memory-mapped from disk, and not loaded in memory.
Memory-mapping allows access to data on disk, and leverages virtual memory capabilities for fast lookups.
## Performance
Iterating over a memory-mapped dataset using Arrow is fast. Iterating over Wikipedia on a laptop gives you speeds of 1-3 Gbit/s:
```python
>>> s = """batch_size = 1000
... for batch in wiki.iter(batch_size):
... ...
... """
>>> time = timeit.timeit(stmt=s, number=1, globals=globals())
>>> print(f"Time to iterate over the {wiki.dataset_size >> 30} GB dataset: {time:.1f} sec, "
... f"ie. {float(wiki.dataset_size >> 27)/time:.1f} Gb/s")
Time to iterate over the 18 GB dataset: 31.8 sec, ie. 4.8 Gb/s
```
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/nlp_load.mdx | # Load text data
This guide shows you how to load text datasets. To learn how to load any type of dataset, take a look at the <a class="underline decoration-sky-400 decoration-2 font-semibold" href="./loading">general loading guide</a>.
Text files are one of the most common file types for storing a dataset. By default, π€ Datasets samples a text file line by line to build the dataset.
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("text", data_files={"train": ["my_text_1.txt", "my_text_2.txt"], "test": "my_test_file.txt"})
# Load from a directory
>>> dataset = load_dataset("text", data_dir="path/to/text/dataset")
```
To sample a text file by paragraph or even an entire document, use the `sample_by` parameter:
```py
# Sample by paragraph
>>> dataset = load_dataset("text", data_files={"train": "my_train_file.txt", "test": "my_test_file.txt"}, sample_by="paragraph")
# Sample by document
>>> dataset = load_dataset("text", data_files={"train": "my_train_file.txt", "test": "my_test_file.txt"}, sample_by="document")
```
You can also use grep patterns to load specific files:
```py
>>> from datasets import load_dataset
>>> c4_subset = load_dataset("allenai/c4", data_files="en/c4-train.0000*-of-01024.json.gz")
```
To load remote text files via HTTP, pass the URLs instead:
```py
>>> dataset = load_dataset("text", data_files="https://huggingface.co/datasets/lhoestq/test/resolve/main/some_text.txt")
``` | 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/nlp_process.mdx | # Process text data
This guide shows specific methods for processing text datasets. Learn how to:
- Tokenize a dataset with [`~Dataset.map`].
- Align dataset labels with label ids for NLI datasets.
For a guide on how to process any type of dataset, take a look at the <a class="underline decoration-sky-400 decoration-2 font-semibold" href="./process">general process guide</a>.
## Map
The [`~Dataset.map`] function supports processing batches of examples at once which speeds up tokenization.
Load a tokenizer from π€ [Transformers](https://huggingface.co/transformers/):
```py
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
```
Set the `batched` parameter to `True` in the [`~Dataset.map`] function to apply the tokenizer to batches of examples:
```py
>>> dataset = dataset.map(lambda examples: tokenizer(examples["text"]), batched=True)
>>> dataset[0]
{'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .',
'label': 1,
'input_ids': [101, 1996, 2600, 2003, 16036, 2000, 2022, 1996, 7398, 2301, 1005, 1055, 2047, 1000, 16608, 1000, 1998, 2008, 2002, 1005, 1055, 2183, 2000, 2191, 1037, 17624, 2130, 3618, 2084, 7779, 29058, 8625, 13327, 1010, 3744, 1011, 18856, 19513, 3158, 5477, 4168, 2030, 7112, 16562, 2140, 1012, 102],
'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}
```
The [`~Dataset.map`] function converts the returned values to a PyArrow-supported format. But explicitly returning the tensors as NumPy arrays is faster because it is a natively supported PyArrow format. Set `return_tensors="np"` when you tokenize your text:
```py
>>> dataset = dataset.map(lambda examples: tokenizer(examples["text"], return_tensors="np"), batched=True)
```
## Align
The [`~Dataset.align_labels_with_mapping`] function aligns a dataset label id with the label name. Not all π€ Transformers models follow the prescribed label mapping of the original dataset, especially for NLI datasets. For example, the [MNLI](https://huggingface.co/datasets/glue) dataset uses the following label mapping:
```py
>>> label2id = {"entailment": 0, "neutral": 1, "contradiction": 2}
```
To align the dataset label mapping with the mapping used by a model, create a dictionary of the label name and id to align on:
```py
>>> label2id = {"contradiction": 0, "neutral": 1, "entailment": 2}
```
Pass the dictionary of the label mappings to the [`~Dataset.align_labels_with_mapping`] function, and the column to align on:
```py
>>> from datasets import load_dataset
>>> mnli = load_dataset("glue", "mnli", split="train")
>>> mnli_aligned = mnli.align_labels_with_mapping(label2id, "label")
```
You can also use this function to assign a custom mapping of labels to ids. | 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/image_dataset.mdx | # Create an image dataset
There are two methods for creating and sharing an image dataset. This guide will show you how to:
* Create an image dataset with `ImageFolder` and some metadata. This is a no-code solution for quickly creating an image dataset with several thousand images.
* Create an image dataset by writing a loading script. This method is a bit more involved, but you have greater flexibility over how a dataset is defined, downloaded, and generated which can be useful for more complex or large scale image datasets.
<Tip>
You can control access to your dataset by requiring users to share their contact information first. Check out the [Gated datasets](https://huggingface.co/docs/hub/datasets-gated) guide for more information about how to enable this feature on the Hub.
</Tip>
## ImageFolder
The `ImageFolder` is a dataset builder designed to quickly load an image dataset with several thousand images without requiring you to write any code.
<Tip>
π‘ Take a look at the [Split pattern hierarchy](repository_structure#split-pattern-hierarchy) to learn more about how `ImageFolder` creates dataset splits based on your dataset repository structure.
</Tip>
`ImageFolder` automatically infers the class labels of your dataset based on the directory name. Store your dataset in a directory structure like:
```
folder/train/dog/golden_retriever.png
folder/train/dog/german_shepherd.png
folder/train/dog/chihuahua.png
folder/train/cat/maine_coon.png
folder/train/cat/bengal.png
folder/train/cat/birman.png
```
Then users can load your dataset by specifying `imagefolder` in [`load_dataset`] and the directory in `data_dir`:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("imagefolder", data_dir="/path/to/folder")
```
You can also use `imagefolder` to load datasets involving multiple splits. To do so, your dataset directory should have the following structure:
```
folder/train/dog/golden_retriever.png
folder/train/cat/maine_coon.png
folder/test/dog/german_shepherd.png
folder/test/cat/bengal.png
```
<Tip warning={true}>
If all image files are contained in a single directory or if they are not on the same level of directory structure, `label` column won't be added automatically. If you need it, set `drop_labels=False` explicitly.
</Tip>
If there is additional information you'd like to include about your dataset, like text captions or bounding boxes, add it as a `metadata.csv` file in your folder. This lets you quickly create datasets for different computer vision tasks like text captioning or object detection. You can also use a JSONL file `metadata.jsonl`.
```
folder/train/metadata.csv
folder/train/0001.png
folder/train/0002.png
folder/train/0003.png
```
You can also zip your images:
```
folder/metadata.csv
folder/train.zip
folder/test.zip
folder/valid.zip
```
Your `metadata.csv` file must have a `file_name` column which links image files with their metadata:
```csv
file_name,additional_feature
0001.png,This is a first value of a text feature you added to your images
0002.png,This is a second value of a text feature you added to your images
0003.png,This is a third value of a text feature you added to your images
```
or using `metadata.jsonl`:
```jsonl
{"file_name": "0001.png", "additional_feature": "This is a first value of a text feature you added to your images"}
{"file_name": "0002.png", "additional_feature": "This is a second value of a text feature you added to your images"}
{"file_name": "0003.png", "additional_feature": "This is a third value of a text feature you added to your images"}
```
<Tip>
If metadata files are present, the inferred labels based on the directory name are dropped by default. To include those labels, set `drop_labels=False` in `load_dataset`.
</Tip>
### Image captioning
Image captioning datasets have text describing an image. An example `metadata.csv` may look like:
```csv
file_name,text
0001.png,This is a golden retriever playing with a ball
0002.png,A german shepherd
0003.png,One chihuahua
```
Load the dataset with `ImageFolder`, and it will create a `text` column for the image captions:
```py
>>> dataset = load_dataset("imagefolder", data_dir="/path/to/folder", split="train")
>>> dataset[0]["text"]
"This is a golden retriever playing with a ball"
```
### Object detection
Object detection datasets have bounding boxes and categories identifying objects in an image. An example `metadata.jsonl` may look like:
```jsonl
{"file_name": "0001.png", "objects": {"bbox": [[302.0, 109.0, 73.0, 52.0]], "categories": [0]}}
{"file_name": "0002.png", "objects": {"bbox": [[810.0, 100.0, 57.0, 28.0]], "categories": [1]}}
{"file_name": "0003.png", "objects": {"bbox": [[160.0, 31.0, 248.0, 616.0], [741.0, 68.0, 202.0, 401.0]], "categories": [2, 2]}}
```
Load the dataset with `ImageFolder`, and it will create a `objects` column with the bounding boxes and the categories:
```py
>>> dataset = load_dataset("imagefolder", data_dir="/path/to/folder", split="train")
>>> dataset[0]["objects"]
{"bbox": [[302.0, 109.0, 73.0, 52.0]], "categories": [0]}
```
### Upload dataset to the Hub
Once you've created a dataset, you can share it to the Hub with the [`~datasets.DatasetDict.push_to_hub`] method. Make sure you have the [huggingface_hub](https://huggingface.co/docs/huggingface_hub/index) library installed and you're logged in to your Hugging Face account (see the [Upload with Python tutorial](upload_dataset#upload-with-python) for more details).
Upload your dataset with [`~datasets.DatasetDict.push_to_hub`]:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("imagefolder", data_dir="/path/to/folder", split="train")
>>> dataset.push_to_hub("stevhliu/my-image-captioning-dataset")
```
## Loading script
Write a dataset loading script to share a dataset. It defines a dataset's splits and configurations, and handles downloading and generating a dataset. The script is located in the same folder or repository as the dataset and should have the same name.
```
my_dataset/
βββ README.md
βββ my_dataset.py
βββ data/ # optional, may contain your images or TAR archives
```
This structure allows your dataset to be loaded in one line:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("path/to/my_dataset")
```
This guide will show you how to create a dataset loading script for image datasets, which is a bit different from <a class="underline decoration-green-400 decoration-2 font-semibold" href="./dataset_script">creating a loading script for text datasets</a>. You'll learn how to:
* Create a dataset builder class.
* Create dataset configurations.
* Add dataset metadata.
* Download and define the dataset splits.
* Generate the dataset.
* Generate the dataset metadata (optional).
* Upload the dataset to the Hub.
The best way to learn is to open up an existing image dataset loading script, like [Food-101](https://huggingface.co/datasets/food101/blob/main/food101.py), and follow along!
<Tip>
To help you get started, we created a loading script [template](https://github.com/huggingface/datasets/blob/main/templates/new_dataset_script.py) you can copy and use as a starting point!
</Tip>
### Create a dataset builder class
[`GeneratorBasedBuilder`] is the base class for datasets generated from a dictionary generator. Within this class, there are three methods to help create your dataset:
* `info` stores information about your dataset like its description, license, and features.
* `split_generators` downloads the dataset and defines its splits.
* `generate_examples` generates the images and labels for each split.
Start by creating your dataset class as a subclass of [`GeneratorBasedBuilder`] and add the three methods. Don't worry about filling in each of these methods yet, you'll develop those over the next few sections:
```py
class Food101(datasets.GeneratorBasedBuilder):
"""Food-101 Images dataset"""
def _info(self):
def _split_generators(self, dl_manager):
def _generate_examples(self, images, metadata_path):
```
#### Multiple configurations
In some cases, a dataset may have more than one configuration. For example, if you check out the [Imagenette dataset](https://huggingface.co/datasets/frgfm/imagenette), you'll notice there are three subsets.
To create different configurations, use the [`BuilderConfig`] class to create a subclass for your dataset. Provide the links to download the images and labels in `data_url` and `metadata_urls`:
```py
class Food101Config(datasets.BuilderConfig):
"""Builder Config for Food-101"""
def __init__(self, data_url, metadata_urls, **kwargs):
"""BuilderConfig for Food-101.
Args:
data_url: `string`, url to download the zip file from.
metadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs
**kwargs: keyword arguments forwarded to super.
"""
super(Food101Config, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
self.data_url = data_url
self.metadata_urls = metadata_urls
```
Now you can define your subsets at the top of [`GeneratorBasedBuilder`]. Imagine you want to create two subsets in the Food-101 dataset based on whether it is a breakfast or dinner food.
1. Define your subsets with `Food101Config` in a list in `BUILDER_CONFIGS`.
2. For each configuration, provide a name, description, and where to download the images and labels from.
```py
class Food101(datasets.GeneratorBasedBuilder):
"""Food-101 Images dataset"""
BUILDER_CONFIGS = [
Food101Config(
name="breakfast",
description="Food types commonly eaten during breakfast.",
data_url="https://link-to-breakfast-foods.zip",
metadata_urls={
"train": "https://link-to-breakfast-foods-train.txt",
"validation": "https://link-to-breakfast-foods-validation.txt"
},
,
Food101Config(
name="dinner",
description="Food types commonly eaten during dinner.",
data_url="https://link-to-dinner-foods.zip",
metadata_urls={
"train": "https://link-to-dinner-foods-train.txt",
"validation": "https://link-to-dinner-foods-validation.txt"
},
)...
]
```
Now if users want to load the `breakfast` configuration, they can use the configuration name:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("food101", "breakfast", split="train")
```
### Add dataset metadata
Adding information about your dataset is useful for users to learn more about it. This information is stored in the [`DatasetInfo`] class which is returned by the `info` method. Users can access this information by:
```py
>>> from datasets import load_dataset_builder
>>> ds_builder = load_dataset_builder("food101")
>>> ds_builder.info
```
There is a lot of information you can specify about your dataset, but some important ones to include are:
1. `description` provides a concise description of the dataset.
2. `features` specify the dataset column types. Since you're creating an image loading script, you'll need to include the [`Image`] feature.
3. `supervised_keys` specify the input feature and label.
4. `homepage` provides a link to the dataset homepage.
5. `citation` is a BibTeX citation of the dataset.
6. `license` states the dataset's license.
<Tip>
You'll notice a lot of the dataset information is defined earlier in the loading script which makes it easier to read. There are also other [`~Datasets.Features`] you can input, so be sure to check out the full list for more details.
</Tip>
```py
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image": datasets.Image(),
"label": datasets.ClassLabel(names=_NAMES),
}
),
supervised_keys=("image", "label"),
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE,
task_templates=[ImageClassification(image_column="image", label_column="label")],
)
```
### Download and define the dataset splits
Now that you've added some information about your dataset, the next step is to download the dataset and generate the splits.
1. Use the [`DownloadManager.download`] method to download the dataset and any other metadata you'd like to associate with it. This method accepts:
* a name to a file inside a Hub dataset repository (in other words, the `data/` folder)
* a URL to a file hosted somewhere else
* a list or dictionary of file names or URLs
In the Food-101 loading script, you'll notice again the URLs are defined earlier in the script.
2. After you've downloaded the dataset, use the [`SplitGenerator`] to organize the images and labels in each split. Name each split with a standard name like: `Split.TRAIN`, `Split.TEST`, and `SPLIT.Validation`.
In the `gen_kwargs` parameter, specify the file paths to the `images` to iterate over and load. If necessary, you can use [`DownloadManager.iter_archive`] to iterate over images in TAR archives. You can also specify the associated labels in the `metadata_path`. The `images` and `metadata_path` are actually passed onto the next step where you'll actually generate the dataset.
<Tip warning={true}>
To stream a TAR archive file, you need to use [`DownloadManager.iter_archive`]! The [`DownloadManager.download_and_extract`] function does not support TAR archives in streaming mode.
</Tip>
```py
def _split_generators(self, dl_manager):
archive_path = dl_manager.download(_BASE_URL)
split_metadata_paths = dl_manager.download(_METADATA_URLS)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"images": dl_manager.iter_archive(archive_path),
"metadata_path": split_metadata_paths["train"],
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"images": dl_manager.iter_archive(archive_path),
"metadata_path": split_metadata_paths["test"],
},
),
]
```
### Generate the dataset
The last method in the [`GeneratorBasedBuilder`] class actually generates the images and labels in the dataset. It yields a dataset according to the stucture specified in `features` from the `info` method. As you can see, `generate_examples` accepts the `images` and `metadata_path` from the previous method as arguments.
<Tip warning={true}>
To stream a TAR archive file, the `metadata_path` needs to be opened and read first. TAR files are accessed and yielded sequentially. This means you need to have the metadata information in hand first so you can yield it with its corresponding image.
</Tip>
Now you can write a function for opening and loading examples from the dataset:
```py
def _generate_examples(self, images, metadata_path):
"""Generate images and labels for splits."""
with open(metadata_path, encoding="utf-8") as f:
files_to_keep = set(f.read().split("\n"))
for file_path, file_obj in images:
if file_path.startswith(_IMAGES_DIR):
if file_path[len(_IMAGES_DIR) : -len(".jpg")] in files_to_keep:
label = file_path.split("/")[2]
yield file_path, {
"image": {"path": file_path, "bytes": file_obj.read()},
"label": label,
}
```
### Generate the dataset metadata (optional)
The dataset metadata can be generated and stored in the dataset card (`README.md` file).
Run the following command to generate your dataset metadata in `README.md` and make sure your new loading script works correctly:
```bash
datasets-cli test path/to/<your-dataset-loading-script> --save_info --all_configs
```
If your loading script passed the test, you should now have the `dataset_info` YAML fields in the header of the `README.md` file in your dataset folder.
### Upload the dataset to the Hub
Once your script is ready, [create a dataset card](./dataset_card) and [upload it to the Hub](./share).
Congratulations, you can now load your dataset from the Hub! π₯³
```py
>>> from datasets import load_dataset
>>> load_dataset("<username>/my_dataset")
```
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/load_hub.mdx | # Load a dataset from the Hub
Finding high-quality datasets that are reproducible and accessible can be difficult. One of π€ Datasets main goals is to provide a simple way to load a dataset of any format or type. The easiest way to get started is to discover an existing dataset on the [Hugging Face Hub](https://huggingface.co/datasets) - a community-driven collection of datasets for tasks in NLP, computer vision, and audio - and use π€ Datasets to download and generate the dataset.
This tutorial uses the [rotten_tomatoes](https://huggingface.co/datasets/rotten_tomatoes) and [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) datasets, but feel free to load any dataset you want and follow along. Head over to the Hub now and find a dataset for your task!
## Load a dataset
Before you take the time to download a dataset, it's often helpful to quickly get some general information about a dataset. A dataset's information is stored inside [`DatasetInfo`] and can include information such as the dataset description, features, and dataset size.
Use the [`load_dataset_builder`] function to load a dataset builder and inspect a dataset's attributes without committing to downloading it:
```py
>>> from datasets import load_dataset_builder
>>> ds_builder = load_dataset_builder("rotten_tomatoes")
# Inspect dataset description
>>> ds_builder.info.description
Movie Review Dataset. This is a dataset of containing 5,331 positive and 5,331 negative processed sentences from Rotten Tomatoes movie reviews. This data was first used in Bo Pang and Lillian Lee, ``Seeing stars: Exploiting class relationships for sentiment categorization with respect to rating scales.'', Proceedings of the ACL, 2005.
# Inspect dataset features
>>> ds_builder.info.features
{'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
'text': Value(dtype='string', id=None)}
```
If you're happy with the dataset, then load it with [`load_dataset`]:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("rotten_tomatoes", split="train")
```
## Splits
A split is a specific subset of a dataset like `train` and `test`. List a dataset's split names with the [`get_dataset_split_names`] function:
```py
>>> from datasets import get_dataset_split_names
>>> get_dataset_split_names("rotten_tomatoes")
['train', 'validation', 'test']
```
Then you can load a specific split with the `split` parameter. Loading a dataset `split` returns a [`Dataset`] object:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("rotten_tomatoes", split="train")
>>> dataset
Dataset({
features: ['text', 'label'],
num_rows: 8530
})
```
If you don't specify a `split`, π€ Datasets returns a [`DatasetDict`] object instead:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("rotten_tomatoes")
DatasetDict({
train: Dataset({
features: ['text', 'label'],
num_rows: 8530
})
validation: Dataset({
features: ['text', 'label'],
num_rows: 1066
})
test: Dataset({
features: ['text', 'label'],
num_rows: 1066
})
})
```
## Configurations
Some datasets contain several sub-datasets. For example, the [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) dataset has several sub-datasets, each one containing audio data in a different language. These sub-datasets are known as *configurations*, and you must explicitly select one when loading the dataset. If you don't provide a configuration name, π€ Datasets will raise a `ValueError` and remind you to choose a configuration.
Use the [`get_dataset_config_names`] function to retrieve a list of all the possible configurations available to your dataset:
```py
>>> from datasets import get_dataset_config_names
>>> configs = get_dataset_config_names("PolyAI/minds14")
>>> print(configs)
['cs-CZ', 'de-DE', 'en-AU', 'en-GB', 'en-US', 'es-ES', 'fr-FR', 'it-IT', 'ko-KR', 'nl-NL', 'pl-PL', 'pt-PT', 'ru-RU', 'zh-CN', 'all']
```
Then load the configuration you want:
```py
>>> from datasets import load_dataset
>>> mindsFR = load_dataset("PolyAI/minds14", "fr-FR", split="train")
```
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/filesystems.mdx | # Cloud storage
π€ Datasets supports access to cloud storage providers through a `fsspec` FileSystem implementations.
You can save and load datasets from any cloud storage in a Pythonic way.
Take a look at the following table for some example of supported cloud storage providers:
| Storage provider | Filesystem implementation |
|----------------------|---------------------------------------------------------------|
| Amazon S3 | [s3fs](https://s3fs.readthedocs.io/en/latest/) |
| Google Cloud Storage | [gcsfs](https://gcsfs.readthedocs.io/en/latest/) |
| Azure Blob/DataLake | [adlfs](https://github.com/fsspec/adlfs) |
| Dropbox | [dropboxdrivefs](https://github.com/MarineChap/dropboxdrivefs)|
| Google Drive | [gdrivefs](https://github.com/intake/gdrivefs) |
| Oracle Cloud Storage | [ocifs](https://ocifs.readthedocs.io/en/latest/) |
This guide will show you how to save and load datasets with any cloud storage.
Here are examples for S3, Google Cloud Storage, Azure Blob Storage, and Oracle Cloud Object Storage.
## Set up your cloud storage FileSystem
### Amazon S3
1. Install the S3 FileSystem implementation:
```
>>> pip install s3fs
```
2. Define your credentials
To use an anonymous connection, use `anon=True`.
Otherwise, include your `aws_access_key_id` and `aws_secret_access_key` whenever you are interacting with a private S3 bucket.
```py
>>> storage_options = {"anon": True} # for anonymous connection
# or use your credentials
>>> storage_options = {"key": aws_access_key_id, "secret": aws_secret_access_key} # for private buckets
# or use a botocore session
>>> import aiobotocore.session
>>> s3_session = aiobotocore.session.AioSession(profile="my_profile_name")
>>> storage_options = {"session": s3_session}
```
3. Create your FileSystem instance
```py
>>> import s3fs
>>> fs = s3fs.S3FileSystem(**storage_options)
```
### Google Cloud Storage
1. Install the Google Cloud Storage implementation:
```
>>> conda install -c conda-forge gcsfs
# or install with pip
>>> pip install gcsfs
```
2. Define your credentials
```py
>>> storage_options={"token": "anon"} # for anonymous connection
# or use your credentials of your default gcloud credentials or from the google metadata service
>>> storage_options={"project": "my-google-project"}
# or use your credentials from elsewhere, see the documentation at https://gcsfs.readthedocs.io/
>>> storage_options={"project": "my-google-project", "token": TOKEN}
```
3. Create your FileSystem instance
```py
>>> import gcsfs
>>> fs = gcsfs.GCSFileSystem(**storage_options)
```
### Azure Blob Storage
1. Install the Azure Blob Storage implementation:
```
>>> conda install -c conda-forge adlfs
# or install with pip
>>> pip install adlfs
```
2. Define your credentials
```py
>>> storage_options = {"anon": True} # for anonymous connection
# or use your credentials
>>> storage_options = {"account_name": ACCOUNT_NAME, "account_key": ACCOUNT_KEY} # gen 2 filesystem
# or use your credentials with the gen 1 filesystem
>>> storage_options={"tenant_id": TENANT_ID, "client_id": CLIENT_ID, "client_secret": CLIENT_SECRET}
```
3. Create your FileSystem instance
```py
>>> import adlfs
>>> fs = adlfs.AzureBlobFileSystem(**storage_options)
```
### Oracle Cloud Object Storage
1. Install the OCI FileSystem implementation:
```
>>> pip install ocifs
```
2. Define your credentials
```py
>>> storage_options = {"config": "~/.oci/config", "region": "us-ashburn-1"}
```
3. Create your FileSystem instance
```py
>>> import ocifs
>>> fs = ocifs.OCIFileSystem(**storage_options)
```
## Load and Save your datasets using your cloud storage FileSystem
### Download and prepare a dataset into a cloud storage
You can download and prepare a dataset into your cloud storage by specifying a remote `output_dir` in `download_and_prepare`.
Don't forget to use the previously defined `storage_options` containing your credentials to write into a private cloud storage.
The `download_and_prepare` method works in two steps:
1. it first downloads the raw data files (if any) in your local cache. You can set your cache directory by passing `cache_dir` to [`load_dataset_builder`]
2. then it generates the dataset in Arrow or Parquet format in your cloud storage by iterating over the raw data files.
Load a dataset builder from the Hugging Face Hub (see [how to load from the Hugging Face Hub](./loading#hugging-face-hub)):
```py
>>> output_dir = "s3://my-bucket/imdb"
>>> builder = load_dataset_builder("imdb")
>>> builder.download_and_prepare(output_dir, storage_options=storage_options, file_format="parquet")
```
Load a dataset builder using a loading script (see [how to load a local loading script](./loading#local-loading-script)):
```py
>>> output_dir = "s3://my-bucket/imdb"
>>> builder = load_dataset_builder("path/to/local/loading_script/loading_script.py")
>>> builder.download_and_prepare(output_dir, storage_options=storage_options, file_format="parquet")
```
Use your own data files (see [how to load local and remote files](./loading#local-and-remote-files)):
```py
>>> data_files = {"train": ["path/to/train.csv"]}
>>> output_dir = "s3://my-bucket/imdb"
>>> builder = load_dataset_builder("csv", data_files=data_files)
>>> builder.download_and_prepare(output_dir, storage_options=storage_options, file_format="parquet")
```
It is highly recommended to save the files as compressed Parquet files to optimize I/O by specifying `file_format="parquet"`.
Otherwise the dataset is saved as an uncompressed Arrow file.
You can also specify the size of the shards using `max_shard_size` (default is 500MB):
```py
>>> builder.download_and_prepare(output_dir, storage_options=storage_options, file_format="parquet", max_shard_size="1GB")
```
#### Dask
Dask is a parallel computing library and it has a pandas-like API for working with larger than memory Parquet datasets in parallel.
Dask can use multiple threads or processes on a single machine, or a cluster of machines to process data in parallel.
Dask supports local data but also data from a cloud storage.
Therefore you can load a dataset saved as sharded Parquet files in Dask with
```py
import dask.dataframe as dd
df = dd.read_parquet(output_dir, storage_options=storage_options)
# or if your dataset is split into train/valid/test
df_train = dd.read_parquet(output_dir + f"/{builder.name}-train-*.parquet", storage_options=storage_options)
df_valid = dd.read_parquet(output_dir + f"/{builder.name}-validation-*.parquet", storage_options=storage_options)
df_test = dd.read_parquet(output_dir + f"/{builder.name}-test-*.parquet", storage_options=storage_options)
```
You can find more about dask dataframes in their [documentation](https://docs.dask.org/en/stable/dataframe.html).
## Saving serialized datasets
After you have processed your dataset, you can save it to your cloud storage with [`Dataset.save_to_disk`]:
```py
# saves encoded_dataset to amazon s3
>>> encoded_dataset.save_to_disk("s3://my-private-datasets/imdb/train", storage_options=storage_options)
# saves encoded_dataset to google cloud storage
>>> encoded_dataset.save_to_disk("gcs://my-private-datasets/imdb/train", storage_options=storage_options)
# saves encoded_dataset to microsoft azure blob/datalake
>>> encoded_dataset.save_to_disk("adl://my-private-datasets/imdb/train", storage_options=storage_options)
```
<Tip>
Remember to define your credentials in your [FileSystem instance](#set-up-your-cloud-storage-filesystem) `fs` whenever you are interacting with a private cloud storage.
</Tip>
## Listing serialized datasets
List files from a cloud storage with your FileSystem instance `fs`, using `fs.ls`:
```py
>>> fs.ls("my-private-datasets/imdb/train", detail=False)
["dataset_info.json.json","dataset.arrow","state.json"]
```
### Load serialized datasets
When you are ready to use your dataset again, reload it with [`Dataset.load_from_disk`]:
```py
>>> from datasets import load_from_disk
# load encoded_dataset from cloud storage
>>> dataset = load_from_disk("s3://a-public-datasets/imdb/train", storage_options=storage_options)
>>> print(len(dataset))
25000
```
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/use_with_jax.mdx | # Use with JAX
This document is a quick introduction to using `datasets` with JAX, with a particular focus on how to get
`jax.Array` objects out of our datasets, and how to use them to train JAX models.
<Tip>
`jax` and `jaxlib` are required to reproduce to code above, so please make sure you
install them as `pip install datasets[jax]`.
</Tip>
## Dataset format
By default, datasets return regular Python objects: integers, floats, strings, lists, etc., and
string and binary objects are unchanged, since JAX only supports numbers.
To get JAX arrays (numpy-like) instead, you can set the format of the dataset to `jax`:
```py
>>> from datasets import Dataset
>>> data = [[1, 2], [3, 4]]
>>> ds = Dataset.from_dict({"data": data})
>>> ds = ds.with_format("jax")
>>> ds[0]
{'data': DeviceArray([1, 2], dtype=int32)}
>>> ds[:2]
{'data': DeviceArray([
[1, 2],
[3, 4]], dtype=int32)}
```
<Tip>
A [`Dataset`] object is a wrapper of an Arrow table, which allows fast reads from arrays in the dataset to JAX arrays.
</Tip>
Note that the exact same procedure applies to `DatasetDict` objects, so that
when setting the format of a `DatasetDict` to `jax`, all the `Dataset`s there
will be formatted as `jax`:
```py
>>> from datasets import DatasetDict
>>> data = {"train": {"data": [[1, 2], [3, 4]]}, "test": {"data": [[5, 6], [7, 8]]}}
>>> dds = DatasetDict.from_dict(data)
>>> dds = dds.with_format("jax")
>>> dds["train"][:2]
{'data': DeviceArray([
[1, 2],
[3, 4]], dtype=int32)}
```
Another thing you'll need to take into consideration is that the formatting is not applied
until you actually access the data. So if you want to get a JAX array out of a dataset,
you'll need to access the data first, otherwise the format will remain the same.
Finally, to load the data in the device of your choice, you can specify the `device` argument,
but note that `jaxlib.xla_extension.Device` is not supported as it's not serializable with neither
`pickle` not `dill`, so you'll need to use its string identifier instead:
```py
>>> import jax
>>> from datasets import Dataset
>>> data = [[1, 2], [3, 4]]
>>> ds = Dataset.from_dict({"data": data})
>>> device = str(jax.devices()[0]) # Not casting to `str` before passing it to `with_format` will raise a `ValueError`
>>> ds = ds.with_format("jax", device=device)
>>> ds[0]
{'data': DeviceArray([1, 2], dtype=int32)}
>>> ds[0]["data"].device()
TFRT_CPU_0
>>> assert ds[0]["data"].device() == jax.devices()[0]
True
```
Note that if the `device` argument is not provided to `with_format` then it will use the default
device which is `jax.devices()[0]`.
## N-dimensional arrays
If your dataset consists of N-dimensional arrays, you will see that by default they are considered as nested lists.
In particular, a JAX formatted dataset outputs a `DeviceArray` object, which is a numpy-like array, so it does not
need the [`Array`] feature type to be specified as opposed to PyTorch or TensorFlow formatters.
```py
>>> from datasets import Dataset
>>> data = [[[1, 2],[3, 4]], [[5, 6],[7, 8]]]
>>> ds = Dataset.from_dict({"data": data})
>>> ds = ds.with_format("jax")
>>> ds[0]
{'data': DeviceArray([[1, 2],
[3, 4]], dtype=int32)}
```
## Other feature types
[`ClassLabel`] data is properly converted to arrays:
```py
>>> from datasets import Dataset, Features, ClassLabel
>>> labels = [0, 0, 1]
>>> features = Features({"label": ClassLabel(names=["negative", "positive"])})
>>> ds = Dataset.from_dict({"label": labels}, features=features)
>>> ds = ds.with_format("jax")
>>> ds[:3]
{'label': DeviceArray([0, 0, 1], dtype=int32)}
```
String and binary objects are unchanged, since JAX only supports numbers.
The [`Image`] and [`Audio`] feature types are also supported.
<Tip>
To use the [`Image`] feature type, you'll need to install the `vision` extra as
`pip install datasets[vision]`.
</Tip>
```py
>>> from datasets import Dataset, Features, Image
>>> images = ["path/to/image.png"] * 10
>>> features = Features({"image": Image()})
>>> ds = Dataset.from_dict({"image": images}, features=features)
>>> ds = ds.with_format("jax")
>>> ds[0]["image"].shape
(512, 512, 3)
>>> ds[0]
{'image': DeviceArray([[[ 255, 255, 255],
[ 255, 255, 255],
...,
[ 255, 255, 255],
[ 255, 255, 255]]], dtype=uint8)}
>>> ds[:2]["image"].shape
(2, 512, 512, 3)
>>> ds[:2]
{'image': DeviceArray([[[[ 255, 255, 255],
[ 255, 255, 255],
...,
[ 255, 255, 255],
[ 255, 255, 255]]]], dtype=uint8)}
```
<Tip>
To use the [`Audio`] feature type, you'll need to install the `audio` extra as
`pip install datasets[audio]`.
</Tip>
```py
>>> from datasets import Dataset, Features, Audio
>>> audio = ["path/to/audio.wav"] * 10
>>> features = Features({"audio": Audio()})
>>> ds = Dataset.from_dict({"audio": audio}, features=features)
>>> ds = ds.with_format("jax")
>>> ds[0]["audio"]["array"]
DeviceArray([-0.059021 , -0.03894043, -0.00735474, ..., 0.0133667 ,
0.01809692, 0.00268555], dtype=float32)
>>> ds[0]["audio"]["sampling_rate"]
DeviceArray(44100, dtype=int32, weak_type=True)
```
## Data loading
JAX doesn't have any built-in data loading capabilities, so you'll need to use a library such
as [PyTorch](https://pytorch.org/) to load your data using a `DataLoader` or [TensorFlow](https://www.tensorflow.org/)
using a `tf.data.Dataset`. Citing the [JAX documentation](https://jax.readthedocs.io/en/latest/notebooks/Neural_Network_and_Data_Loading.html#data-loading-with-pytorch) on this topic:
"JAX is laser-focused on program transformations and accelerator-backed NumPy, so we donβt
include data loading or munging in the JAX library. There are already a lot of great data loaders
out there, so letβs just use them instead of reinventing anything. Weβll grab PyTorchβs data loader,
and make a tiny shim to make it work with NumPy arrays.".
So that's the reason why JAX-formatting in `datasets` is so useful, because it lets you use
any model from the HuggingFace Hub with JAX, without having to worry about the data loading
part.
### Using `with_format('jax')`
The easiest way to get JAX arrays out of a dataset is to use the `with_format('jax')` method. Lets assume
that we want to train a neural network on the [MNIST dataset](http://yann.lecun.com/exdb/mnist/) available
at the HuggingFace Hub at https://huggingface.co/datasets/mnist.
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("mnist")
>>> ds = ds.with_format("jax")
>>> ds["train"][0]
{'image': DeviceArray([[ 0, 0, 0, ...],
[ 0, 0, 0, ...],
...,
[ 0, 0, 0, ...],
[ 0, 0, 0, ...]], dtype=uint8),
'label': DeviceArray(5, dtype=int32)}
```
Once the format is set we can feed the dataset to the JAX model in batches using the `Dataset.iter()`
method:
```py
>>> for epoch in range(epochs):
... for batch in ds["train"].iter(batch_size=32):
... x, y = batch["image"], batch["label"]
... ...
```
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/dataset_card.mdx | # Create a dataset card
Each dataset should have a dataset card to promote responsible usage and inform users of any potential biases within the dataset.
This idea was inspired by the Model Cards proposed by [Mitchell, 2018](https://arxiv.org/abs/1810.03993).
Dataset cards help users understand a dataset's contents, the context for using the dataset, how it was created, and any other considerations a user should be aware of.
Creating a dataset card is easy and can be done in a just a few steps:
1. Go to your dataset repository on the [Hub](https://hf.co/new-dataset) and click on **Create Dataset Card** to create a new `README.md` file in your repository.
2. Use the **Metadata UI** to select the tags that describe your dataset. You can add a license, language, pretty_name, the task_categories, size_categories, and any other tags that you think are relevant. These tags help users discover and find your dataset on the Hub.
<div class="flex justify-center">
<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/hub/datasets-metadata-ui.png"/>
<img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/hub/datasets-metadata-ui-dark.png"/>
</div>
<Tip>
For a complete, but not required, set of tag options you can also look at the [Dataset Card specifications](https://github.com/huggingface/hub-docs/blob/main/datasetcard.md?plain=1). This'll have a few more tag options like `multilinguality` and `language_creators` which are useful but not absolutely necessary.
</Tip>
3. Click on the **Import dataset card template** link to automatically create a template with all the relevant fields to complete. Fill out the template sections to the best of your ability. Take a look at the [Dataset Card Creation Guide](https://github.com/huggingface/datasets/blob/main/templates/README_guide.md) for more detailed information about what to include in each section of the card. For fields you are unable to complete, you can write **[More Information Needed]**.
4. Once you're done, commit the changes to the `README.md` file and you'll see the completed dataset card on your repository.
YAML also allows you to customize the way your dataset is loaded by [defining splits and/or configurations](./repository_structure#define-your-splits-and-subsets-in-yaml) without the need to write any code.
Feel free to take a look at the [SNLI](https://huggingface.co/datasets/snli), [CNN/DailyMail](https://huggingface.co/datasets/cnn_dailymail), and [AllocinΓ©](https://huggingface.co/datasets/allocine) dataset cards as examples to help you get started.
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/process.mdx | # Process
π€ Datasets provides many tools for modifying the structure and content of a dataset. These tools are important for tidying up a dataset, creating additional columns, converting between features and formats, and much more.
This guide will show you how to:
- Reorder rows and split the dataset.
- Rename and remove columns, and other common column operations.
- Apply processing functions to each example in a dataset.
- Concatenate datasets.
- Apply a custom formatting transform.
- Save and export processed datasets.
For more details specific to processing other dataset modalities, take a look at the <a class="underline decoration-pink-400 decoration-2 font-semibold" href="./audio_process">process audio dataset guide</a>, the <a class="underline decoration-yellow-400 decoration-2 font-semibold" href="./image_process">process image dataset guide</a>, or the <a class="underline decoration-green-400 decoration-2 font-semibold" href="./nlp_process">process text dataset guide</a>.
The examples in this guide use the MRPC dataset, but feel free to load any dataset of your choice and follow along!
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("glue", "mrpc", split="train")
```
<Tip warning={true}>
All processing methods in this guide return a new [`Dataset`] object. Modification is not done in-place. Be careful about overriding your previous dataset!
</Tip>
## Sort, shuffle, select, split, and shard
There are several functions for rearranging the structure of a dataset.
These functions are useful for selecting only the rows you want, creating train and test splits, and sharding very large datasets into smaller chunks.
### Sort
Use [`~Dataset.sort`] to sort column values according to their numerical values. The provided column must be NumPy compatible.
```py
>>> dataset["label"][:10]
[1, 0, 1, 0, 1, 1, 0, 1, 0, 0]
>>> sorted_dataset = dataset.sort("label")
>>> sorted_dataset["label"][:10]
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
>>> sorted_dataset["label"][-10:]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
```
Under the hood, this creates a list of indices that is sorted according to values of the column.
This indices mapping is then used to access the right rows in the underlying Arrow table.
### Shuffle
The [`~Dataset.shuffle`] function randomly rearranges the column values. You can specify the `generator` parameter in this function to use a different `numpy.random.Generator` if you want more control over the algorithm used to shuffle the dataset.
```py
>>> shuffled_dataset = sorted_dataset.shuffle(seed=42)
>>> shuffled_dataset["label"][:10]
[1, 1, 1, 0, 1, 1, 1, 1, 1, 0]
```
Shuffling takes the list of indices `[0:len(my_dataset)]` and shuffles it to create an indices mapping.
However as soon as your [`Dataset`] has an indices mapping, the speed can become 10x slower.
This is because there is an extra step to get the row index to read using the indices mapping, and most importantly, you aren't reading contiguous chunks of data anymore.
To restore the speed, you'd need to rewrite the entire dataset on your disk again using [`Dataset.flatten_indices`], which removes the indices mapping.
Alternatively, you can switch to an [`IterableDataset`] and leverage its fast approximate shuffling [`IterableDataset.shuffle`]:
```py
>>> iterable_dataset = dataset.to_iterable_dataset(num_shards=128)
>>> shuffled_iterable_dataset = iterable_dataset.shuffle(seed=42, buffer_size=1000)
```
### Select and Filter
There are two options for filtering rows in a dataset: [`~Dataset.select`] and [`~Dataset.filter`].
- [`~Dataset.select`] returns rows according to a list of indices:
```py
>>> small_dataset = dataset.select([0, 10, 20, 30, 40, 50])
>>> len(small_dataset)
6
```
- [`~Dataset.filter`] returns rows that match a specified condition:
```py
>>> start_with_ar = dataset.filter(lambda example: example["sentence1"].startswith("Ar"))
>>> len(start_with_ar)
6
>>> start_with_ar["sentence1"]
['Around 0335 GMT , Tab shares were up 19 cents , or 4.4 % , at A $ 4.56 , having earlier set a record high of A $ 4.57 .',
'Arison said Mann may have been one of the pioneers of the world music movement and he had a deep love of Brazilian music .',
'Arts helped coach the youth on an eighth-grade football team at Lombardi Middle School in Green Bay .',
'Around 9 : 00 a.m. EDT ( 1300 GMT ) , the euro was at $ 1.1566 against the dollar , up 0.07 percent on the day .',
"Arguing that the case was an isolated example , Canada has threatened a trade backlash if Tokyo 's ban is not justified on scientific grounds .",
'Artists are worried the plan would harm those who need help most - performers who have a difficult time lining up shows .'
]
```
[`~Dataset.filter`] can also filter by indices if you set `with_indices=True`:
```py
>>> even_dataset = dataset.filter(lambda example, idx: idx % 2 == 0, with_indices=True)
>>> len(even_dataset)
1834
>>> len(dataset) / 2
1834.0
```
Unless the list of indices to keep is contiguous, those methods also create an indices mapping under the hood.
### Split
The [`~Dataset.train_test_split`] function creates train and test splits if your dataset doesn't already have them. This allows you to adjust the relative proportions or an absolute number of samples in each split. In the example below, use the `test_size` parameter to create a test split that is 10% of the original dataset:
```py
>>> dataset.train_test_split(test_size=0.1)
{'train': Dataset(schema: {'sentence1': 'string', 'sentence2': 'string', 'label': 'int64', 'idx': 'int32'}, num_rows: 3301),
'test': Dataset(schema: {'sentence1': 'string', 'sentence2': 'string', 'label': 'int64', 'idx': 'int32'}, num_rows: 367)}
>>> 0.1 * len(dataset)
366.8
```
The splits are shuffled by default, but you can set `shuffle=False` to prevent shuffling.
### Shard
π€ Datasets supports sharding to divide a very large dataset into a predefined number of chunks. Specify the `num_shards` parameter in [`~Dataset.shard`] to determine the number of shards to split the dataset into. You'll also need to provide the shard you want to return with the `index` parameter.
For example, the [imdb](https://huggingface.co/datasets/imdb) dataset has 25000 examples:
```py
>>> from datasets import load_dataset
>>> datasets = load_dataset("imdb", split="train")
>>> print(dataset)
Dataset({
features: ['text', 'label'],
num_rows: 25000
})
```
After sharding the dataset into four chunks, the first shard will only have 6250 examples:
```py
>>> dataset.shard(num_shards=4, index=0)
Dataset({
features: ['text', 'label'],
num_rows: 6250
})
>>> print(25000/4)
6250.0
```
## Rename, remove, cast, and flatten
The following functions allow you to modify the columns of a dataset. These functions are useful for renaming or removing columns, changing columns to a new set of features, and flattening nested column structures.
### Rename
Use [`~Dataset.rename_column`] when you need to rename a column in your dataset. Features associated with the original column are actually moved under the new column name, instead of just replacing the original column in-place.
Provide [`~Dataset.rename_column`] with the name of the original column, and the new column name:
```py
>>> dataset
Dataset({
features: ['sentence1', 'sentence2', 'label', 'idx'],
num_rows: 3668
})
>>> dataset = dataset.rename_column("sentence1", "sentenceA")
>>> dataset = dataset.rename_column("sentence2", "sentenceB")
>>> dataset
Dataset({
features: ['sentenceA', 'sentenceB', 'label', 'idx'],
num_rows: 3668
})
```
### Remove
When you need to remove one or more columns, provide the column name to remove to the [`~Dataset.remove_columns`] function. Remove more than one column by providing a list of column names:
```py
>>> dataset = dataset.remove_columns("label")
>>> dataset
Dataset({
features: ['sentence1', 'sentence2', 'idx'],
num_rows: 3668
})
>>> dataset = dataset.remove_columns(["sentence1", "sentence2"])
>>> dataset
Dataset({
features: ['idx'],
num_rows: 3668
})
```
### Cast
The [`~Dataset.cast`] function transforms the feature type of one or more columns. This function accepts your new [`Features`] as its argument. The example below demonstrates how to change the [`ClassLabel`] and [`Value`] features:
```py
>>> dataset.features
{'sentence1': Value(dtype='string', id=None),
'sentence2': Value(dtype='string', id=None),
'label': ClassLabel(num_classes=2, names=['not_equivalent', 'equivalent'], names_file=None, id=None),
'idx': Value(dtype='int32', id=None)}
>>> from datasets import ClassLabel, Value
>>> new_features = dataset.features.copy()
>>> new_features["label"] = ClassLabel(names=["negative", "positive"])
>>> new_features["idx"] = Value("int64")
>>> dataset = dataset.cast(new_features)
>>> dataset.features
{'sentence1': Value(dtype='string', id=None),
'sentence2': Value(dtype='string', id=None),
'label': ClassLabel(num_classes=2, names=['negative', 'positive'], names_file=None, id=None),
'idx': Value(dtype='int64', id=None)}
```
<Tip>
Casting only works if the original feature type and new feature type are compatible. For example, you can cast a column with the feature type `Value("int32")` to `Value("bool")` if the original column only contains ones and zeros.
</Tip>
Use the [`~Dataset.cast_column`] function to change the feature type of a single column. Pass the column name and its new feature type as arguments:
```py
>>> dataset.features
{'audio': Audio(sampling_rate=44100, mono=True, id=None)}
>>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16000))
>>> dataset.features
{'audio': Audio(sampling_rate=16000, mono=True, id=None)}
```
### Flatten
Sometimes a column can be a nested structure of several types. Take a look at the nested structure below from the SQuAD dataset:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("squad", split="train")
>>> dataset.features
{'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None),
'context': Value(dtype='string', id=None),
'id': Value(dtype='string', id=None),
'question': Value(dtype='string', id=None),
'title': Value(dtype='string', id=None)}
```
The `answers` field contains two subfields: `text` and `answer_start`. Use the [`~Dataset.flatten`] function to extract the subfields into their own separate columns:
```py
>>> flat_dataset = dataset.flatten()
>>> flat_dataset
Dataset({
features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'],
num_rows: 87599
})
```
Notice how the subfields are now their own independent columns: `answers.text` and `answers.answer_start`.
## Map
Some of the more powerful applications of π€ Datasets come from using the [`~Dataset.map`] function. The primary purpose of [`~Dataset.map`] is to speed up processing functions. It allows you to apply a processing function to each example in a dataset, independently or in batches. This function can even create new rows and columns.
In the following example, prefix each `sentence1` value in the dataset with `'My sentence: '`.
Start by creating a function that adds `'My sentence: '` to the beginning of each sentence. The function needs to accept and output a `dict`:
```py
>>> def add_prefix(example):
... example["sentence1"] = 'My sentence: ' + example["sentence1"]
... return example
```
Now use [`~Dataset.map`] to apply the `add_prefix` function to the entire dataset:
```py
>>> updated_dataset = small_dataset.map(add_prefix)
>>> updated_dataset["sentence1"][:5]
['My sentence: Amrozi accused his brother , whom he called " the witness " , of deliberately distorting his evidence .',
"My sentence: Yucaipa owned Dominick 's before selling the chain to Safeway in 1998 for $ 2.5 billion .",
'My sentence: They had published an advertisement on the Internet on June 10 , offering the cargo for sale , he added .',
'My sentence: Around 0335 GMT , Tab shares were up 19 cents , or 4.4 % , at A $ 4.56 , having earlier set a record high of A $ 4.57 .',
]
```
Let's take a look at another example, except this time, you'll remove a column with [`~Dataset.map`]. When you remove a column, it is only removed after the example has been provided to the mapped function. This allows the mapped function to use the content of the columns before they are removed.
Specify the column to remove with the `remove_columns` parameter in [`~Dataset.map`]:
```py
>>> updated_dataset = dataset.map(lambda example: {"new_sentence": example["sentence1"]}, remove_columns=["sentence1"])
>>> updated_dataset.column_names
['sentence2', 'label', 'idx', 'new_sentence']
```
<Tip>
π€ Datasets also has a [`~Dataset.remove_columns`] function which is faster because it doesn't copy the data of the remaining columns.
</Tip>
You can also use [`~Dataset.map`] with indices if you set `with_indices=True`. The example below adds the index to the beginning of each sentence:
```py
>>> updated_dataset = dataset.map(lambda example, idx: {"sentence2": f"{idx}: " + example["sentence2"]}, with_indices=True)
>>> updated_dataset["sentence2"][:5]
['0: Referring to him as only " the witness " , Amrozi accused his brother of deliberately distorting his evidence .',
"1: Yucaipa bought Dominick 's in 1995 for $ 693 million and sold it to Safeway for $ 1.8 billion in 1998 .",
"2: On June 10 , the ship 's owners had published an advertisement on the Internet , offering the explosives for sale .",
'3: Tab shares jumped 20 cents , or 4.6 % , to set a record closing high at A $ 4.57 .',
'4: PG & E Corp. shares jumped $ 1.63 or 8 percent to $ 21.03 on the New York Stock Exchange on Friday .'
]
```
The [`~Dataset.map`] also works with the rank of the process if you set `with_rank=True`. This is analogous to the `with_indices` parameter. The `with_rank` parameter in the mapped function goes after the `index` one if it is already present.
```py
>>> from multiprocess import set_start_method
>>> import torch
>>> import os
>>>
>>> set_start_method("spawn")
>>>
>>> def gpu_computation(example, rank):
>>> os.environ["CUDA_VISIBLE_DEVICES"] = str(rank % torch.cuda.device_count())
>>> # Your big GPU call goes here
>>> return examples
>>>
>>> updated_dataset = dataset.map(gpu_computation, with_rank=True)
```
The main use-case for rank is to parallelize computation across several GPUs. This requires setting `multiprocess.set_start_method("spawn")`. If you don't you'll receive the following CUDA error:
```bash
RuntimeError: Cannot re-initialize CUDA in forked subprocess. To use CUDA with multiprocessing, you must use the 'spawn' start method.
```
### Multiprocessing
Multiprocessing significantly speeds up processing by parallelizing processes on the CPU. Set the `num_proc` parameter in [`~Dataset.map`] to set the number of processes to use:
```py
>>> updated_dataset = dataset.map(lambda example, idx: {"sentence2": f"{idx}: " + example["sentence2"]}, num_proc=4)
```
### Batch processing
The [`~Dataset.map`] function supports working with batches of examples. Operate on batches by setting `batched=True`. The default batch size is 1000, but you can adjust it with the `batch_size` parameter. Batch processing enables interesting applications such as splitting long sentences into shorter chunks and data augmentation.
#### Split long examples
When examples are too long, you may want to split them into several smaller chunks. Begin by creating a function that:
1. Splits the `sentence1` field into chunks of 50 characters.
2. Stacks all the chunks together to create the new dataset.
```py
>>> def chunk_examples(examples):
... chunks = []
... for sentence in examples["sentence1"]:
... chunks += [sentence[i:i + 50] for i in range(0, len(sentence), 50)]
... return {"chunks": chunks}
```
Apply the function with [`~Dataset.map`]:
```py
>>> chunked_dataset = dataset.map(chunk_examples, batched=True, remove_columns=dataset.column_names)
>>> chunked_dataset[:10]
{'chunks': ['Amrozi accused his brother , whom he called " the ',
'witness " , of deliberately distorting his evidenc',
'e .',
"Yucaipa owned Dominick 's before selling the chain",
' to Safeway in 1998 for $ 2.5 billion .',
'They had published an advertisement on the Interne',
't on June 10 , offering the cargo for sale , he ad',
'ded .',
'Around 0335 GMT , Tab shares were up 19 cents , or',
' 4.4 % , at A $ 4.56 , having earlier set a record']}
```
Notice how the sentences are split into shorter chunks now, and there are more rows in the dataset.
```py
>>> dataset
Dataset({
features: ['sentence1', 'sentence2', 'label', 'idx'],
num_rows: 3668
})
>>> chunked_dataset
Dataset(schema: {'chunks': 'string'}, num_rows: 10470)
```
#### Data augmentation
The [`~Dataset.map`] function could also be used for data augmentation. The following example generates additional words for a masked token in a sentence.
Load and use the [RoBERTA](https://huggingface.co/roberta-base) model in π€ Transformers' [FillMaskPipeline](https://huggingface.co/transformers/main_classes/pipelines#transformers.FillMaskPipeline):
```py
>>> from random import randint
>>> from transformers import pipeline
>>> fillmask = pipeline("fill-mask", model="roberta-base")
>>> mask_token = fillmask.tokenizer.mask_token
>>> smaller_dataset = dataset.filter(lambda e, i: i<100, with_indices=True)
```
Create a function to randomly select a word to mask in the sentence. The function should also return the original sentence and the top two replacements generated by RoBERTA.
```py
>>> def augment_data(examples):
... outputs = []
... for sentence in examples["sentence1"]:
... words = sentence.split(' ')
... K = randint(1, len(words)-1)
... masked_sentence = " ".join(words[:K] + [mask_token] + words[K+1:])
... predictions = fillmask(masked_sentence)
... augmented_sequences = [predictions[i]["sequence"] for i in range(3)]
... outputs += [sentence] + augmented_sequences
...
... return {"data": outputs}
```
Use [`~Dataset.map`] to apply the function over the whole dataset:
```py
>>> augmented_dataset = smaller_dataset.map(augment_data, batched=True, remove_columns=dataset.column_names, batch_size=8)
>>> augmented_dataset[:9]["data"]
['Amrozi accused his brother , whom he called " the witness " , of deliberately distorting his evidence .',
'Amrozi accused his brother, whom he called " the witness ", of deliberately withholding his evidence.',
'Amrozi accused his brother, whom he called " the witness ", of deliberately suppressing his evidence.',
'Amrozi accused his brother, whom he called " the witness ", of deliberately destroying his evidence.',
"Yucaipa owned Dominick 's before selling the chain to Safeway in 1998 for $ 2.5 billion .",
'Yucaipa owned Dominick Stores before selling the chain to Safeway in 1998 for $ 2.5 billion.',
"Yucaipa owned Dominick's before selling the chain to Safeway in 1998 for $ 2.5 billion.",
'Yucaipa owned Dominick Pizza before selling the chain to Safeway in 1998 for $ 2.5 billion.'
]
```
For each original sentence, RoBERTA augmented a random word with three alternatives. The original word `distorting` is supplemented by `withholding`, `suppressing`, and `destroying`.
### Process multiple splits
Many datasets have splits that can be processed simultaneously with [`DatasetDict.map`]. For example, tokenize the `sentence1` field in the train and test split by:
```py
>>> from datasets import load_dataset
# load all the splits
>>> dataset = load_dataset('glue', 'mrpc')
>>> encoded_dataset = dataset.map(lambda examples: tokenizer(examples["sentence1"]), batched=True)
>>> encoded_dataset["train"][0]
{'sentence1': 'Amrozi accused his brother , whom he called " the witness " , of deliberately distorting his evidence .',
'sentence2': 'Referring to him as only " the witness " , Amrozi accused his brother of deliberately distorting his evidence .',
'label': 1,
'idx': 0,
'input_ids': [ 101, 7277, 2180, 5303, 4806, 1117, 1711, 117, 2292, 1119, 1270, 107, 1103, 7737, 107, 117, 1104, 9938, 4267, 12223, 21811, 1117, 2554, 119, 102],
'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
}
```
### Distributed usage
When you use [`~Dataset.map`] in a distributed setting, you should also use [torch.distributed.barrier](https://pytorch.org/docs/stable/distributed?highlight=barrier#torch.distributed.barrier). This ensures the main process performs the mapping, while the other processes load the results, thereby avoiding duplicate work.
The following example shows how you can use `torch.distributed.barrier` to synchronize the processes:
```py
>>> from datasets import Dataset
>>> import torch.distributed
>>> dataset1 = Dataset.from_dict({"a": [0, 1, 2]})
>>> if training_args.local_rank > 0:
... print("Waiting for main process to perform the mapping")
... torch.distributed.barrier()
>>> dataset2 = dataset1.map(lambda x: {"a": x["a"] + 1})
>>> if training_args.local_rank == 0:
... print("Loading results from main process")
... torch.distributed.barrier()
```
## Concatenate
Separate datasets can be concatenated if they share the same column types. Concatenate datasets with [`concatenate_datasets`]:
```py
>>> from datasets import concatenate_datasets, load_dataset
>>> bookcorpus = load_dataset("bookcorpus", split="train")
>>> wiki = load_dataset("wikipedia", "20220301.en", split="train")
>>> wiki = wiki.remove_columns([col for col in wiki.column_names if col != "text"]) # only keep the 'text' column
>>> assert bookcorpus.features.type == wiki.features.type
>>> bert_dataset = concatenate_datasets([bookcorpus, wiki])
```
You can also concatenate two datasets horizontally by setting `axis=1` as long as the datasets have the same number of rows:
```py
>>> from datasets import Dataset
>>> bookcorpus_ids = Dataset.from_dict({"ids": list(range(len(bookcorpus)))})
>>> bookcorpus_with_ids = concatenate_datasets([bookcorpus, bookcorpus_ids], axis=1)
```
### Interleave
You can also mix several datasets together by taking alternating examples from each one to create a new dataset. This is known as *interleaving*, which is enabled by the [`interleave_datasets`] function. Both [`interleave_datasets`] and [`concatenate_datasets`] work with regular [`Dataset`] and [`IterableDataset`] objects.
Refer to the [Stream](./stream#interleave) guide for an example of how to interleave [`IterableDataset`] objects.
You can define sampling probabilities for each of the original datasets to specify how to interleave the datasets.
In this case, the new dataset is constructed by getting examples one by one from a random dataset until one of the datasets runs out of samples.
```py
>>> seed = 42
>>> probabilities = [0.3, 0.5, 0.2]
>>> d1 = Dataset.from_dict({"a": [0, 1, 2]})
>>> d2 = Dataset.from_dict({"a": [10, 11, 12, 13]})
>>> d3 = Dataset.from_dict({"a": [20, 21, 22]})
>>> dataset = interleave_datasets([d1, d2, d3], probabilities=probabilities, seed=seed)
>>> dataset["a"]
[10, 11, 20, 12, 0, 21, 13]
```
You can also specify the `stopping_strategy`. The default strategy, `first_exhausted`, is a subsampling strategy, i.e the dataset construction is stopped as soon one of the dataset runs out of samples.
You can specify `stopping_strategy=all_exhausted` to execute an oversampling strategy. In this case, the dataset construction is stopped as soon as every samples in every dataset has been added at least once. In practice, it means that if a dataset is exhausted, it will return to the beginning of this dataset until the stop criterion has been reached.
Note that if no sampling probabilities are specified, the new dataset will have `max_length_datasets*nb_dataset samples`.
```py
>>> d1 = Dataset.from_dict({"a": [0, 1, 2]})
>>> d2 = Dataset.from_dict({"a": [10, 11, 12, 13]})
>>> d3 = Dataset.from_dict({"a": [20, 21, 22]})
>>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted")
>>> dataset["a"]
[0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 20]
```
## Format
The [`~Dataset.set_format`] function changes the format of a column to be compatible with some common data formats. Specify the output you'd like in the `type` parameter and the columns you want to format. Formatting is applied on-the-fly.
For example, create PyTorch tensors by setting `type="torch"`:
```py
>>> import torch
>>> dataset.set_format(type="torch", columns=["input_ids", "token_type_ids", "attention_mask", "label"])
```
The [`~Dataset.with_format`] function also changes the format of a column, except it returns a new [`Dataset`] object:
```py
>>> dataset = dataset.with_format(type="torch", columns=["input_ids", "token_type_ids", "attention_mask", "label"])
```
<Tip>
π€ Datasets also provides support for other common data formats such as NumPy, Pandas, and JAX. Check out the [Using Datasets with TensorFlow](https://huggingface.co/docs/datasets/master/en/use_with_tensorflow#using-totfdataset) guide for more details on how to efficiently create a TensorFlow dataset.
</Tip>
If you need to reset the dataset to its original format, use the [`~Dataset.reset_format`] function:
```py
>>> dataset.format
{'type': 'torch', 'format_kwargs': {}, 'columns': ['label'], 'output_all_columns': False}
>>> dataset.reset_format()
>>> dataset.format
{'type': 'python', 'format_kwargs': {}, 'columns': ['idx', 'label', 'sentence1', 'sentence2'], 'output_all_columns': False}
```
### Format transform
The [`~Dataset.set_transform`] function applies a custom formatting transform on-the-fly. This function replaces any previously specified format. For example, you can use this function to tokenize and pad tokens on-the-fly. Tokenization is only applied when examples are accessed:
```py
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
>>> def encode(batch):
... return tokenizer(batch["sentence1"], padding="longest", truncation=True, max_length=512, return_tensors="pt")
>>> dataset.set_transform(encode)
>>> dataset.format
{'type': 'custom', 'format_kwargs': {'transform': <function __main__.encode(batch)>}, 'columns': ['idx', 'label', 'sentence1', 'sentence2'], 'output_all_columns': False}
```
You can also use the [`~Dataset.set_transform`] function to decode formats not supported by [`Features`]. For example, the [`Audio`] feature uses [`soundfile`](https://python-soundfile.readthedocs.io/en/0.11.0/) - a fast and simple library to install - but it does not provide support for less common audio formats. Here is where you can use [`~Dataset.set_transform`] to apply a custom decoding transform on the fly. You're free to use any library you like to decode the audio files.
The example below uses the [`pydub`](http://pydub.com/) package to open an audio format not supported by `soundfile`:
```py
>>> import numpy as np
>>> from pydub import AudioSegment
>>> audio_dataset_amr = Dataset.from_dict({"audio": ["audio_samples/audio.amr"]})
>>> def decode_audio_with_pydub(batch, sampling_rate=16_000):
... def pydub_decode_file(audio_path):
... sound = AudioSegment.from_file(audio_path)
... if sound.frame_rate != sampling_rate:
... sound = sound.set_frame_rate(sampling_rate)
... channel_sounds = sound.split_to_mono()
... samples = [s.get_array_of_samples() for s in channel_sounds]
... fp_arr = np.array(samples).T.astype(np.float32)
... fp_arr /= np.iinfo(samples[0].typecode).max
... return fp_arr
...
... batch["audio"] = [pydub_decode_file(audio_path) for audio_path in batch["audio"]]
... return batch
>>> audio_dataset_amr.set_transform(decode_audio_with_pydub)
```
## Save
Once you are done processing your dataset, you can save and reuse it later with [`~Dataset.save_to_disk`].
Save your dataset by providing the path to the directory you wish to save it to:
```py
>>> encoded_dataset.save_to_disk("path/of/my/dataset/directory")
```
Use the [`load_from_disk`] function to reload the dataset:
```py
>>> from datasets import load_from_disk
>>> reloaded_dataset = load_from_disk("path/of/my/dataset/directory")
```
<Tip>
Want to save your dataset to a cloud storage provider? Read our [Cloud Storage](./filesystems) guide to learn how to save your dataset to AWS or Google Cloud Storage.
</Tip>
## Export
π€ Datasets supports exporting as well so you can work with your dataset in other applications. The following table shows currently supported file formats you can export to:
| File type | Export method |
|-------------------------|----------------------------------------------------------------|
| CSV | [`Dataset.to_csv`] |
| JSON | [`Dataset.to_json`] |
| Parquet | [`Dataset.to_parquet`] |
| SQL | [`Dataset.to_sql`] |
| In-memory Python object | [`Dataset.to_pandas`] or [`Dataset.to_dict`] |
For example, export your dataset to a CSV file like this:
```py
>>> encoded_dataset.to_csv("path/of/my/dataset.csv")
```
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/_config.py | # docstyle-ignore
INSTALL_CONTENT = """
# Datasets installation
! pip install datasets transformers
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/datasets.git
"""
notebook_first_cells = [{"type": "code", "content": INSTALL_CONTENT}]
default_branch_name = "main"
version_prefix = ""
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/audio_process.mdx | # Process audio data
This guide shows specific methods for processing audio datasets. Learn how to:
- Resample the sampling rate.
- Use [`~Dataset.map`] with audio datasets.
For a guide on how to process any type of dataset, take a look at the <a class="underline decoration-sky-400 decoration-2 font-semibold" href="./process">general process guide</a>.
## Cast
The [`~Dataset.cast_column`] function is used to cast a column to another feature to be decoded. When you use this function with the [`Audio`] feature, you can resample the sampling rate:
```py
>>> from datasets import load_dataset, Audio
>>> dataset = load_dataset("PolyAI/minds14", "en-US", split="train")
>>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16000))
```
Audio files are decoded and resampled on-the-fly, so the next time you access an example, the audio file is resampled to 16kHz:
```py
>>> dataset[0]["audio"]
{'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ...,
3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32),
'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav',
'sampling_rate': 16000}
```
<div class="flex justify-center">
<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/resample.gif"/>
<img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/resample-dark.gif"/>
</div>
## Map
The [`~Dataset.map`] function helps preprocess your entire dataset at once. Depending on the type of model you're working with, you'll need to either load a [feature extractor](https://huggingface.co/docs/transformers/model_doc/auto#transformers.AutoFeatureExtractor) or a [processor](https://huggingface.co/docs/transformers/model_doc/auto#transformers.AutoProcessor).
- For pretrained speech recognition models, load a feature extractor and tokenizer and combine them in a `processor`:
```py
>>> from transformers import AutoTokenizer, AutoFeatureExtractor, AutoProcessor
>>> model_checkpoint = "facebook/wav2vec2-large-xlsr-53"
# after defining a vocab.json file you can instantiate a tokenizer object:
>>> tokenizer = AutoTokenizer("./vocab.json", unk_token="[UNK]", pad_token="[PAD]", word_delimiter_token="|")
>>> feature_extractor = AutoFeatureExtractor.from_pretrained(model_checkpoint)
>>> processor = AutoProcessor.from_pretrained(feature_extractor=feature_extractor, tokenizer=tokenizer)
```
- For fine-tuned speech recognition models, you only need to load a `processor`:
```py
>>> from transformers import AutoProcessor
>>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h")
```
When you use [`~Dataset.map`] with your preprocessing function, include the `audio` column to ensure you're actually resampling the audio data:
```py
>>> def prepare_dataset(batch):
... audio = batch["audio"]
... batch["input_values"] = processor(audio["array"], sampling_rate=audio["sampling_rate"]).input_values[0]
... batch["input_length"] = len(batch["input_values"])
... with processor.as_target_processor():
... batch["labels"] = processor(batch["sentence"]).input_ids
... return batch
>>> dataset = dataset.map(prepare_dataset, remove_columns=dataset.column_names)
```
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/loading.mdx | # Load
Your data can be stored in various places; they can be on your local machine's disk, in a Github repository, and in in-memory data structures like Python dictionaries and Pandas DataFrames. Wherever a dataset is stored, π€ Datasets can help you load it.
This guide will show you how to load a dataset from:
- The Hub without a dataset loading script
- Local loading script
- Local files
- In-memory data
- Offline
- A specific slice of a split
For more details specific to loading other dataset modalities, take a look at the <a class="underline decoration-pink-400 decoration-2 font-semibold" href="./audio_load">load audio dataset guide</a>, the <a class="underline decoration-yellow-400 decoration-2 font-semibold" href="./image_load">load image dataset guide</a>, or the <a class="underline decoration-green-400 decoration-2 font-semibold" href="./nlp_load">load text dataset guide</a>.
<a id='load-from-the-hub'></a>
## Hugging Face Hub
Datasets are loaded from a dataset loading script that downloads and generates the dataset. However, you can also load a dataset from any dataset repository on the Hub without a loading script! Begin by [creating a dataset repository](share#create-the-repository) and upload your data files. Now you can use the [`load_dataset`] function to load the dataset.
For example, try loading the files from this [demo repository](https://huggingface.co/datasets/lhoestq/demo1) by providing the repository namespace and dataset name. This dataset repository contains CSV files, and the code below loads the dataset from the CSV files:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("lhoestq/demo1")
```
Some datasets may have more than one version based on Git tags, branches, or commits. Use the `revision` parameter to specify the dataset version you want to load:
```py
>>> dataset = load_dataset(
... "lhoestq/custom_squad",
... revision="main" # tag name, or branch name, or commit hash
... )
```
<Tip>
Refer to the [Upload a dataset to the Hub](./upload_dataset) tutorial for more details on how to create a dataset repository on the Hub, and how to upload your data files.
</Tip>
A dataset without a loading script by default loads all the data into the `train` split. Use the `data_files` parameter to map data files to splits like `train`, `validation` and `test`:
```py
>>> data_files = {"train": "train.csv", "test": "test.csv"}
>>> dataset = load_dataset("namespace/your_dataset_name", data_files=data_files)
```
<Tip warning={true}>
If you don't specify which data files to use, [`load_dataset`] will return all the data files. This can take a long time if you load a large dataset like C4, which is approximately 13TB of data.
</Tip>
You can also load a specific subset of the files with the `data_files` or `data_dir` parameter. These parameters can accept a relative path which resolves to the base path corresponding to where the dataset is loaded from.
```py
>>> from datasets import load_dataset
# load files that match the grep pattern
>>> c4_subset = load_dataset("allenai/c4", data_files="en/c4-train.0000*-of-01024.json.gz")
# load dataset from the en directory on the Hub
>>> c4_subset = load_dataset("allenai/c4", data_dir="en")
```
The `split` parameter can also map a data file to a specific split:
```py
>>> data_files = {"validation": "en/c4-validation.*.json.gz"}
>>> c4_validation = load_dataset("allenai/c4", data_files=data_files, split="validation")
```
## Local loading script
You may have a π€ Datasets loading script locally on your computer. In this case, load the dataset by passing one of the following paths to [`load_dataset`]:
- The local path to the loading script file.
- The local path to the directory containing the loading script file (only if the script file has the same name as the directory).
```py
>>> dataset = load_dataset("path/to/local/loading_script/loading_script.py", split="train")
>>> dataset = load_dataset("path/to/local/loading_script", split="train") # equivalent because the file has the same name as the directory
```
### Edit loading script
You can also edit a loading script from the Hub to add your own modifications. Download the dataset repository locally so any data files referenced by a relative path in the loading script can be loaded:
```bash
git clone https://huggingface.co/datasets/eli5
```
Make your edits to the loading script and then load it by passing its local path to [`~datasets.load_dataset`]:
```py
>>> from datasets import load_dataset
>>> eli5 = load_dataset("path/to/local/eli5")
```
## Local and remote files
Datasets can be loaded from local files stored on your computer and from remote files. The datasets are most likely stored as a `csv`, `json`, `txt` or `parquet` file. The [`load_dataset`] function can load each of these file types.
### CSV
π€ Datasets can read a dataset made up of one or several CSV files (in this case, pass your CSV files as a list):
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("csv", data_files="my_file.csv")
```
<Tip>
For more details, check out the [how to load tabular datasets from CSV files](tabular_load#csv-files) guide.
</Tip>
### JSON
JSON files are loaded directly with [`load_dataset`] as shown below:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("json", data_files="my_file.json")
```
JSON files have diverse formats, but we think the most efficient format is to have multiple JSON objects; each line represents an individual row of data. For example:
```json
{"a": 1, "b": 2.0, "c": "foo", "d": false}
{"a": 4, "b": -5.5, "c": null, "d": true}
```
Another JSON format you may encounter is a nested field, in which case you'll need to specify the `field` argument as shown in the following:
```py
{"version": "0.1.0",
"data": [{"a": 1, "b": 2.0, "c": "foo", "d": false},
{"a": 4, "b": -5.5, "c": null, "d": true}]
}
>>> from datasets import load_dataset
>>> dataset = load_dataset("json", data_files="my_file.json", field="data")
```
To load remote JSON files via HTTP, pass the URLs instead:
```py
>>> base_url = "https://rajpurkar.github.io/SQuAD-explorer/dataset/"
>>> dataset = load_dataset("json", data_files={"train": base_url + "train-v1.1.json", "validation": base_url + "dev-v1.1.json"}, field="data")
```
While these are the most common JSON formats, you'll see other datasets that are formatted differently. π€ Datasets recognizes these other formats and will fallback accordingly on the Python JSON loading methods to handle them.
### Parquet
Parquet files are stored in a columnar format, unlike row-based files like a CSV. Large datasets may be stored in a Parquet file because it is more efficient and faster at returning your query.
To load a Parquet file:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("parquet", data_files={'train': 'train.parquet', 'test': 'test.parquet'})
```
To load remote Parquet files via HTTP, pass the URLs instead:
```py
>>> base_url = "https://storage.googleapis.com/huggingface-nlp/cache/datasets/wikipedia/20200501.en/1.0.0/"
>>> data_files = {"train": base_url + "wikipedia-train.parquet"}
>>> wiki = load_dataset("parquet", data_files=data_files, split="train")
```
### Arrow
Arrow files are stored in an in-memory columnar format, unlike row-based formats like CSV and uncompressed formats like Parquet.
To load an Arrow file:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("arrow", data_files={'train': 'train.arrow', 'test': 'test.arrow'})
```
To load remote Arrow files via HTTP, pass the URLs instead:
```py
>>> base_url = "https://storage.googleapis.com/huggingface-nlp/cache/datasets/wikipedia/20200501.en/1.0.0/"
>>> data_files = {"train": base_url + "wikipedia-train.arrow"}
>>> wiki = load_dataset("arrow", data_files=data_files, split="train")
```
Arrow is the file format used by π€ Datasets under the hood, therefore you can load a local Arrow file using [`Dataset.from_file`] directly:
```py
>>> from datasets import Dataset
>>> dataset = Dataset.from_file("data.arrow")
```
Unlike [`load_dataset`], [`Dataset.from_file`] memory maps the Arrow file without preparing the dataset in the cache, saving you disk space.
The cache directory to store intermediate processing results will be the Arrow file directory in that case.
For now only the Arrow streaming format is supported. The Arrow IPC file format (also known as Feather V2) is not supported.
### SQL
Read database contents with [`~datasets.Dataset.from_sql`] by specifying the URI to connect to your database. You can read both table names and queries:
```py
>>> from datasets import Dataset
# load entire table
>>> dataset = Dataset.from_sql("data_table_name", con="sqlite:///sqlite_file.db")
# load from query
>>> dataset = Dataset.from_sql("SELECT text FROM table WHERE length(text) > 100 LIMIT 10", con="sqlite:///sqlite_file.db")
```
<Tip>
For more details, check out the [how to load tabular datasets from SQL databases](tabular_load#databases) guide.
</Tip>
## Multiprocessing
When a dataset is made of several files (that we call "shards"), it is possible to significantly speed up the dataset downloading and preparation step.
You can choose how many processes you'd like to use to prepare a dataset in parallel using `num_proc`.
In this case, each process is given a subset of shards to prepare:
```python
from datasets import load_dataset
oscar_afrikaans = load_dataset("oscar-corpus/OSCAR-2201", "af", num_proc=8)
imagenet = load_dataset("imagenet-1k", num_proc=8)
ml_librispeech_spanish = load_dataset("facebook/multilingual_librispeech", "spanish", num_proc=8)
```
## In-memory data
π€ Datasets will also allow you to create a [`Dataset`] directly from in-memory data structures like Python dictionaries and Pandas DataFrames.
### Python dictionary
Load Python dictionaries with [`~Dataset.from_dict`]:
```py
>>> from datasets import Dataset
>>> my_dict = {"a": [1, 2, 3]}
>>> dataset = Dataset.from_dict(my_dict)
```
### Python list of dictionaries
Load a list of Python dictionaries with [`~Dataset.from_list`]:
```py
>>> from datasets import Dataset
>>> my_list = [{"a": 1}, {"a": 2}, {"a": 3}]
>>> dataset = Dataset.from_list(my_list)
```
### Python generator
Create a dataset from a Python generator with [`~Dataset.from_generator`]:
```py
>>> from datasets import Dataset
>>> def my_gen():
... for i in range(1, 4):
... yield {"a": i}
...
>>> dataset = Dataset.from_generator(my_gen)
```
This approach supports loading data larger than available memory.
You can also define a sharded dataset by passing lists to `gen_kwargs`:
```py
>>> def gen(shards):
... for shard in shards:
... with open(shard) as f:
... for line in f:
... yield {"line": line}
...
>>> shards = [f"data{i}.txt" for i in range(32)]
>>> ds = IterableDataset.from_generator(gen, gen_kwargs={"shards": shards})
>>> ds = ds.shuffle(seed=42, buffer_size=10_000) # shuffles the shards order + uses a shuffle buffer
>>> from torch.utils.data import DataLoader
>>> dataloader = DataLoader(ds.with_format("torch"), num_workers=4) # give each worker a subset of 32/4=8 shards
```
### Pandas DataFrame
Load Pandas DataFrames with [`~Dataset.from_pandas`]:
```py
>>> from datasets import Dataset
>>> import pandas as pd
>>> df = pd.DataFrame({"a": [1, 2, 3]})
>>> dataset = Dataset.from_pandas(df)
```
<Tip>
For more details, check out the [how to load tabular datasets from Pandas DataFrames](tabular_load#pandas-dataframes) guide.
</Tip>
## Offline
Even if you don't have an internet connection, it is still possible to load a dataset. As long as you've downloaded a dataset from the Hub repository before, it should be cached. This means you can reload the dataset from the cache and use it offline.
If you know you won't have internet access, you can run π€ Datasets in full offline mode. This saves time because instead of waiting for the Dataset builder download to time out, π€ Datasets will look directly in the cache. Set the environment variable `HF_DATASETS_OFFLINE` to `1` to enable full offline mode.
## Slice splits
You can also choose only to load specific slices of a split. There are two options for slicing a split: using strings or the [`ReadInstruction`] API. Strings are more compact and readable for simple cases, while [`ReadInstruction`] is easier to use with variable slicing parameters.
Concatenate a `train` and `test` split by:
```py
>>> train_test_ds = datasets.load_dataset("bookcorpus", split="train+test")
===STRINGAPI-READINSTRUCTION-SPLIT===
>>> ri = datasets.ReadInstruction("train") + datasets.ReadInstruction("test")
>>> train_test_ds = datasets.load_dataset("bookcorpus", split=ri)
```
Select specific rows of the `train` split:
```py
>>> train_10_20_ds = datasets.load_dataset("bookcorpus", split="train[10:20]")
===STRINGAPI-READINSTRUCTION-SPLIT===
>>> train_10_20_ds = datasets.load_dataset("bookcorpu", split=datasets.ReadInstruction("train", from_=10, to=20, unit="abs"))
```
Or select a percentage of a split with:
```py
>>> train_10pct_ds = datasets.load_dataset("bookcorpus", split="train[:10%]")
===STRINGAPI-READINSTRUCTION-SPLIT===
>>> train_10_20_ds = datasets.load_dataset("bookcorpus", split=datasets.ReadInstruction("train", to=10, unit="%"))
```
Select a combination of percentages from each split:
```py
>>> train_10_80pct_ds = datasets.load_dataset("bookcorpus", split="train[:10%]+train[-80%:]")
===STRINGAPI-READINSTRUCTION-SPLIT===
>>> ri = (datasets.ReadInstruction("train", to=10, unit="%") + datasets.ReadInstruction("train", from_=-80, unit="%"))
>>> train_10_80pct_ds = datasets.load_dataset("bookcorpus", split=ri)
```
Finally, you can even create cross-validated splits. The example below creates 10-fold cross-validated splits. Each validation dataset is a 10% chunk, and the training dataset makes up the remaining complementary 90% chunk:
```py
>>> val_ds = datasets.load_dataset("bookcorpus", split=[f"train[{k}%:{k+10}%]" for k in range(0, 100, 10)])
>>> train_ds = datasets.load_dataset("bookcorpus", split=[f"train[:{k}%]+train[{k+10}%:]" for k in range(0, 100, 10)])
===STRINGAPI-READINSTRUCTION-SPLIT===
>>> val_ds = datasets.load_dataset("bookcorpus", [datasets.ReadInstruction("train", from_=k, to=k+10, unit="%") for k in range(0, 100, 10)])
>>> train_ds = datasets.load_dataset("bookcorpus", [(datasets.ReadInstruction("train", to=k, unit="%") + datasets.ReadInstruction("train", from_=k+10, unit="%")) for k in range(0, 100, 10)])
```
### Percent slicing and rounding
The default behavior is to round the boundaries to the nearest integer for datasets where the requested slice boundaries do not divide evenly by 100. As shown below, some slices may contain more examples than others. For instance, if the following train split includes 999 records, then:
```py
# 19 records, from 500 (included) to 519 (excluded).
>>> train_50_52_ds = datasets.load_dataset("bookcorpus", split="train[50%:52%]")
# 20 records, from 519 (included) to 539 (excluded).
>>> train_52_54_ds = datasets.load_dataset("bookcorpus", split="train[52%:54%]")
```
If you want equal sized splits, use `pct1_dropremainder` rounding instead. This treats the specified percentage boundaries as multiples of 1%.
```py
# 18 records, from 450 (included) to 468 (excluded).
>>> train_50_52pct1_ds = datasets.load_dataset("bookcorpus", split=datasets.ReadInstruction("train", from_=50, to=52, unit="%", rounding="pct1_dropremainder"))
# 18 records, from 468 (included) to 486 (excluded).
>>> train_52_54pct1_ds = datasets.load_dataset("bookcorpus", split=datasets.ReadInstruction("train",from_=52, to=54, unit="%", rounding="pct1_dropremainder"))
# Or equivalently:
>>> train_50_52pct1_ds = datasets.load_dataset("bookcorpus", split="train[50%:52%](pct1_dropremainder)")
>>> train_52_54pct1_ds = datasets.load_dataset("bookcorpus", split="train[52%:54%](pct1_dropremainder)")
```
<Tip warning={true}>
`pct1_dropremainder` rounding may truncate the last examples in a dataset if the number of examples in your dataset don't divide evenly by 100.
</Tip>
<a id='troubleshoot'></a>
## Troubleshooting
Sometimes, you may get unexpected results when you load a dataset. Two of the most common issues you may encounter are manually downloading a dataset and specifying features of a dataset.
### Manual download
Certain datasets require you to manually download the dataset files due to licensing incompatibility or if the files are hidden behind a login page. This causes [`load_dataset`] to throw an `AssertionError`. But π€ Datasets provides detailed instructions for downloading the missing files. After you've downloaded the files, use the `data_dir` argument to specify the path to the files you just downloaded.
For example, if you try to download a configuration from the [MATINF](https://huggingface.co/datasets/matinf) dataset:
```py
>>> dataset = load_dataset("matinf", "summarization")
Downloading and preparing dataset matinf/summarization (download: Unknown size, generated: 246.89 MiB, post-processed: Unknown size, total: 246.89 MiB) to /root/.cache/huggingface/datasets/matinf/summarization/1.0.0/82eee5e71c3ceaf20d909bca36ff237452b4e4ab195d3be7ee1c78b53e6f540e...
AssertionError: The dataset matinf with config summarization requires manual data.
Please follow the manual download instructions: To use MATINF you have to download it manually. Please fill this google form (https://forms.gle/nkH4LVE4iNQeDzsc9). You will receive a download link and a password once you complete the form. Please extract all files in one folder and load the dataset with: *datasets.load_dataset('matinf', data_dir='path/to/folder/folder_name')*.
Manual data can be loaded with `datasets.load_dataset(matinf, data_dir='<path/to/manual/data>')
```
If you've already downloaded a dataset from the *Hub with a loading script* to your computer, then you need to pass an absolute path to the `data_dir` or `data_files` parameter to load that dataset. Otherwise, if you pass a relative path, [`load_dataset`] will load the directory from the repository on the Hub instead of the local directory.
### Specify features
When you create a dataset from local files, the [`Features`] are automatically inferred by [Apache Arrow](https://arrow.apache.org/docs/). However, the dataset's features may not always align with your expectations, or you may want to define the features yourself. The following example shows how you can add custom labels with the [`ClassLabel`] feature.
Start by defining your own labels with the [`Features`] class:
```py
>>> class_names = ["sadness", "joy", "love", "anger", "fear", "surprise"]
>>> emotion_features = Features({'text': Value('string'), 'label': ClassLabel(names=class_names)})
```
Next, specify the `features` parameter in [`load_dataset`] with the features you just created:
```py
>>> dataset = load_dataset('csv', data_files=file_dict, delimiter=';', column_names=['text', 'label'], features=emotion_features)
```
Now when you look at your dataset features, you can see it uses the custom labels you defined:
```py
>>> dataset['train'].features
{'text': Value(dtype='string', id=None),
'label': ClassLabel(num_classes=6, names=['sadness', 'joy', 'love', 'anger', 'fear', 'surprise'], names_file=None, id=None)}
```
## Metrics
<Tip warning={true}>
Metrics is deprecated in π€ Datasets. To learn more about how to use metrics, take a look at the library π€ [Evaluate](https://huggingface.co/docs/evaluate/index)! In addition to metrics, you can find more tools for evaluating models and datasets.
</Tip>
When the metric you want to use is not supported by π€ Datasets, you can write and use your own metric script. Load your metric by providing the path to your local metric loading script:
```py
>>> from datasets import load_metric
>>> metric = load_metric('PATH/TO/MY/METRIC/SCRIPT')
>>> # Example of typical usage
>>> for batch in dataset:
... inputs, references = batch
... predictions = model(inputs)
... metric.add_batch(predictions=predictions, references=references)
>>> score = metric.compute()
```
<Tip>
See the [Metrics](./how_to_metrics#custom-metric-loading-script) guide for more details on how to write your own metric loading script.
</Tip>
### Load configurations
It is possible for a metric to have different configurations. The configurations are stored in the `config_name` parameter in [`MetricInfo`] attribute. When you load a metric, provide the configuration name as shown in the following:
```
>>> from datasets import load_metric
>>> metric = load_metric('bleurt', name='bleurt-base-128')
>>> metric = load_metric('bleurt', name='bleurt-base-512')
```
### Distributed setup
When working in a distributed or parallel processing environment, loading and computing a metric can be tricky because these processes are executed in parallel on separate subsets of the data. π€ Datasets supports distributed usage with a few additional arguments when you load a metric.
For example, imagine you are training and evaluating on eight parallel processes. Here's how you would load a metric in this distributed setting:
1. Define the total number of processes with the `num_process` argument.
2. Set the process `rank` as an integer between zero and `num_process - 1`.
3. Load your metric with [`load_metric`] with these arguments:
```py
>>> from datasets import load_metric
>>> metric = load_metric('glue', 'mrpc', num_process=num_process, process_id=rank)
```
<Tip>
Once you've loaded a metric for distributed usage, you can compute the metric as usual. Behind the scenes, [`Metric.compute`] gathers all the predictions and references from the nodes, and computes the final metric.
</Tip>
In some instances, you may be simultaneously running multiple independent distributed evaluations on the same server and files. To avoid any conflicts, it is important to provide an `experiment_id` to distinguish the separate evaluations:
```py
>>> from datasets import load_metric
>>> metric = load_metric('glue', 'mrpc', num_process=num_process, process_id=process_id, experiment_id="My_experiment_10")
```
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/faiss_es.mdx | # Search index
[FAISS](https://github.com/facebookresearch/faiss) and [ElasticSearch](https://www.elastic.co/elasticsearch/) enables searching for examples in a dataset. This can be useful when you want to retrieve specific examples from a dataset that are relevant to your NLP task. For example, if you are working on a Open Domain Question Answering task, you may want to only return examples that are relevant to answering your question.
This guide will show you how to build an index for your dataset that will allow you to search it.
## FAISS
FAISS retrieves documents based on the similarity of their vector representations. In this example, you will generate the vector representations with the [DPR](https://huggingface.co/transformers/model_doc/dpr.html) model.
1. Download the DPR model from π€ Transformers:
```py
>>> from transformers import DPRContextEncoder, DPRContextEncoderTokenizer
>>> import torch
>>> torch.set_grad_enabled(False)
>>> ctx_encoder = DPRContextEncoder.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base")
>>> ctx_tokenizer = DPRContextEncoderTokenizer.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base")
```
2. Load your dataset and compute the vector representations:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset('crime_and_punish', split='train[:100]')
>>> ds_with_embeddings = ds.map(lambda example: {'embeddings': ctx_encoder(**ctx_tokenizer(example["line"], return_tensors="pt"))[0][0].numpy()})
```
3. Create the index with [`Dataset.add_faiss_index`]:
```py
>>> ds_with_embeddings.add_faiss_index(column='embeddings')
```
4. Now you can query your dataset with the `embeddings` index. Load the DPR Question Encoder, and search for a question with [`Dataset.get_nearest_examples`]:
```py
>>> from transformers import DPRQuestionEncoder, DPRQuestionEncoderTokenizer
>>> q_encoder = DPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
>>> q_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
>>> question = "Is it serious ?"
>>> question_embedding = q_encoder(**q_tokenizer(question, return_tensors="pt"))[0][0].numpy()
>>> scores, retrieved_examples = ds_with_embeddings.get_nearest_examples('embeddings', question_embedding, k=10)
>>> retrieved_examples["line"][0]
'_that_ serious? It is not serious at all. Itβs simply a fantasy to amuse\r\n'
```
5. You can access the index with [`Dataset.get_index`] and use it for special operations, e.g. query it using `range_search`:
```py
>>> faiss_index = ds_with_embeddings.get_index('embeddings').faiss_index
>>> limits, distances, indices = faiss_index.range_search(x=question_embedding.reshape(1, -1), thresh=0.95)
```
6. When you are done querying, save the index on disk with [`Dataset.save_faiss_index`]:
```py
>>> ds_with_embeddings.save_faiss_index('embeddings', 'my_index.faiss')
```
7. Reload it at a later time with [`Dataset.load_faiss_index`]:
```py
>>> ds = load_dataset('crime_and_punish', split='train[:100]')
>>> ds.load_faiss_index('embeddings', 'my_index.faiss')
```
## ElasticSearch
Unlike FAISS, ElasticSearch retrieves documents based on exact matches.
Start ElasticSearch on your machine, or see the [ElasticSearch installation guide](https://www.elastic.co/guide/en/elasticsearch/reference/current/setup.html) if you don't already have it installed.
1. Load the dataset you want to index:
```py
>>> from datasets import load_dataset
>>> squad = load_dataset('squad', split='validation')
```
2. Build the index with [`Dataset.add_elasticsearch_index`]:
```py
>>> squad.add_elasticsearch_index("context", host="localhost", port="9200")
```
3. Then you can query the `context` index with [`Dataset.get_nearest_examples`]:
```py
>>> query = "machine"
>>> scores, retrieved_examples = squad.get_nearest_examples("context", query, k=10)
>>> retrieved_examples["title"][0]
'Computational_complexity_theory'
```
4. If you want to reuse the index, define the `es_index_name` parameter when you build the index:
```py
>>> from datasets import load_dataset
>>> squad = load_dataset('squad', split='validation')
>>> squad.add_elasticsearch_index("context", host="localhost", port="9200", es_index_name="hf_squad_val_context")
>>> squad.get_index("context").es_index_name
hf_squad_val_context
```
5. Reload it later with the index name when you call [`Dataset.load_elasticsearch_index`]:
```py
>>> from datasets import load_dataset
>>> squad = load_dataset('squad', split='validation')
>>> squad.load_elasticsearch_index("context", host="localhost", port="9200", es_index_name="hf_squad_val_context")
>>> query = "machine"
>>> scores, retrieved_examples = squad.get_nearest_examples("context", query, k=10)
```
For more advanced ElasticSearch usage, you can specify your own configuration with custom settings:
```py
>>> import elasticsearch as es
>>> import elasticsearch.helpers
>>> from elasticsearch import Elasticsearch
>>> es_client = Elasticsearch([{"host": "localhost", "port": "9200"}]) # default client
>>> es_config = {
... "settings": {
... "number_of_shards": 1,
... "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}},
... },
... "mappings": {"properties": {"text": {"type": "text", "analyzer": "standard", "similarity": "BM25"}}},
... } # default config
>>> es_index_name = "hf_squad_context" # name of the index in ElasticSearch
>>> squad.add_elasticsearch_index("context", es_client=es_client, es_config=es_config, es_index_name=es_index_name)
```
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/use_with_pytorch.mdx | # Use with PyTorch
This document is a quick introduction to using `datasets` with PyTorch, with a particular focus on how to get
`torch.Tensor` objects out of our datasets, and how to use a PyTorch `DataLoader` and a Hugging Face `Dataset`
with the best performance.
## Dataset format
By default, datasets return regular python objects: integers, floats, strings, lists, etc.
To get PyTorch tensors instead, you can set the format of the dataset to `pytorch` using [`Dataset.with_format`]:
```py
>>> from datasets import Dataset
>>> data = [[1, 2],[3, 4]]
>>> ds = Dataset.from_dict({"data": data})
>>> ds = ds.with_format("torch")
>>> ds[0]
{'data': tensor([1, 2])}
>>> ds[:2]
{'data': tensor([[1, 2],
[3, 4]])}
```
<Tip>
A [`Dataset`] object is a wrapper of an Arrow table, which allows fast zero-copy reads from arrays in the dataset to PyTorch tensors.
</Tip>
To load the data as tensors on a GPU, specify the `device` argument:
```py
>>> import torch
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> ds = ds.with_format("torch", device=device)
>>> ds[0]
{'data': tensor([1, 2], device='cuda:0')}
```
## N-dimensional arrays
If your dataset consists of N-dimensional arrays, you will see that by default they are considered as nested lists.
In particular, a PyTorch formatted dataset outputs nested lists instead of a single tensor:
```py
>>> from datasets import Dataset
>>> data = [[[1, 2],[3, 4]],[[5, 6],[7, 8]]]
>>> ds = Dataset.from_dict({"data": data})
>>> ds = ds.with_format("torch")
>>> ds[0]
{'data': [tensor([1, 2]), tensor([3, 4])]}
```
To get a single tensor, you must explicitly use the [`Array`] feature type and specify the shape of your tensors:
```py
>>> from datasets import Dataset, Features, Array2D
>>> data = [[[1, 2],[3, 4]],[[5, 6],[7, 8]]]
>>> features = Features({"data": Array2D(shape=(2, 2), dtype='int32')})
>>> ds = Dataset.from_dict({"data": data}, features=features)
>>> ds = ds.with_format("torch")
>>> ds[0]
{'data': tensor([[1, 2],
[3, 4]])}
>>> ds[:2]
{'data': tensor([[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]])}
```
## Other feature types
[`ClassLabel`] data are properly converted to tensors:
```py
>>> from datasets import Dataset, Features, ClassLabel
>>> labels = [0, 0, 1]
>>> features = Features({"label": ClassLabel(names=["negative", "positive"])})
>>> ds = Dataset.from_dict({"label": labels}, features=features)
>>> ds = ds.with_format("torch")
>>> ds[:3]
{'label': tensor([0, 0, 1])}
```
String and binary objects are unchanged, since PyTorch only supports numbers.
The [`Image`] and [`Audio`] feature types are also supported.
<Tip>
To use the [`Image`] feature type, you'll need to install the `vision` extra as
`pip install datasets[vision]`.
</Tip>
```py
>>> from datasets import Dataset, Features, Audio, Image
>>> images = ["path/to/image.png"] * 10
>>> features = Features({"image": Image()})
>>> ds = Dataset.from_dict({"image": images}, features=features)
>>> ds = ds.with_format("torch")
>>> ds[0]["image"].shape
torch.Size([512, 512, 4])
>>> ds[0]
{'image': tensor([[[255, 215, 106, 255],
[255, 215, 106, 255],
...,
[255, 255, 255, 255],
[255, 255, 255, 255]]], dtype=torch.uint8)}
>>> ds[:2]["image"].shape
torch.Size([2, 512, 512, 4])
>>> ds[:2]
{'image': tensor([[[[255, 215, 106, 255],
[255, 215, 106, 255],
...,
[255, 255, 255, 255],
[255, 255, 255, 255]]]], dtype=torch.uint8)}
```
<Tip>
To use the [`Audio`] feature type, you'll need to install the `audio` extra as
`pip install datasets[audio]`.
</Tip>
```py
>>> from datasets import Dataset, Features, Audio, Image
>>> audio = ["path/to/audio.wav"] * 10
>>> features = Features({"audio": Audio()})
>>> ds = Dataset.from_dict({"audio": audio}, features=features)
>>> ds = ds.with_format("torch")
>>> ds[0]["audio"]["array"]
tensor([ 6.1035e-05, 1.5259e-05, 1.6785e-04, ..., -1.5259e-05,
-1.5259e-05, 1.5259e-05])
>>> ds[0]["audio"]["sampling_rate"]
tensor(44100)
```
## Data loading
Like `torch.utils.data.Dataset` objects, a [`Dataset`] can be passed directly to a PyTorch `DataLoader`:
```py
>>> import numpy as np
>>> from datasets import Dataset
>>> from torch.utils.data import DataLoader
>>> data = np.random.rand(16)
>>> label = np.random.randint(0, 2, size=16)
>>> ds = Dataset.from_dict({"data": data, "label": label}).with_format("torch")
>>> dataloader = DataLoader(ds, batch_size=4)
>>> for batch in dataloader:
... print(batch)
{'data': tensor([0.0047, 0.4979, 0.6726, 0.8105]), 'label': tensor([0, 1, 0, 1])}
{'data': tensor([0.4832, 0.2723, 0.4259, 0.2224]), 'label': tensor([0, 0, 0, 0])}
{'data': tensor([0.5837, 0.3444, 0.4658, 0.6417]), 'label': tensor([0, 1, 0, 0])}
{'data': tensor([0.7022, 0.1225, 0.7228, 0.8259]), 'label': tensor([1, 1, 1, 1])}
```
### Optimize data loading
There are several ways you can increase the speed your data is loaded which can save you time, especially if you are working with large datasets.
PyTorch offers parallelized data loading, retrieving batches of indices instead of individually, and streaming to iterate over the dataset without downloading it on disk.
#### Use multiple Workers
You can parallelize data loading with the `num_workers` argument of a PyTorch `DataLoader` and get a higher throughput.
Under the hood, the `DataLoader` starts `num_workers` processes.
Each process reloads the dataset passed to the `DataLoader` and is used to query examples.
Reloading the dataset inside a worker doesn't fill up your RAM, since it simply memory-maps the dataset again from your disk.
```py
>>> import numpy as np
>>> from datasets import Dataset, load_from_disk
>>> from torch.utils.data import DataLoader
>>> data = np.random.rand(10_000)
>>> Dataset.from_dict({"data": data}).save_to_disk("my_dataset")
>>> ds = load_from_disk("my_dataset").with_format("torch")
>>> dataloader = DataLoader(ds, batch_size=32, num_workers=4)
```
#### Use a BatchSampler (torch<=1.12.1)
For old versions of PyTorch, using a `BatchSampler` can speed up data loading.
Indeed if you are using `torch<=1.12.1`, the PyTorch `DataLoader` load batches of data from a dataset one by one like this:
```py
batch = [dataset[idx] for idx in range(start, end)]
```
Unfortunately, this does numerous read operations on the dataset.
It is more efficient to query batches of examples using a list:
```py
batch = dataset[start:end]
# or
batch = dataset[list_of_indices]
```
For the PyTorch `DataLoader` to query batches using a list, you can use a `BatchSampler`:
```py
>>> from torch.utils.data.sampler import BatchSampler, RandomSampler
>>> batch_sampler = BatchSampler(RandomSampler(ds), batch_size=32, drop_last=False)
>>> dataloader = DataLoader(ds, batch_sampler=batch_sampler)
```
Moreover, this is particularly useful if you used [`set_transform`] to apply a transform on-the-fly when examples are accessed.
You must use a `BatchSampler` if you want the transform to be given full batches instead of receiving `batch_size` times one single element.
Recent versions of PyTorch use a list of indices, so a `BatchSampler` is not needed to get the best speed even if you used [`set_transform`].
### Stream data
Stream a dataset by loading it as an [`IterableDataset`]. This allows you to progressively iterate over a remote dataset without downloading it on disk and or over local data files.
Learn more about which type of dataset is best for your use case in the [choosing between a regular dataset or an iterable dataset](./about_mapstyle_vs_iterable) guide.
An iterable dataset from `datasets` inherits from `torch.utils.data.IterableDataset` so you can pass it to a `torch.utils.data.DataLoader`:
```py
>>> import numpy as np
>>> from datasets import Dataset, load_dataset
>>> from torch.utils.data import DataLoader
>>> data = np.random.rand(10_000)
>>> Dataset.from_dict({"data": data}).push_to_hub("<username>/my_dataset") # Upload to the Hugging Face Hub
>>> my_iterable_dataset = load_dataset("<username>/my_dataset", streaming=True, split="train")
>>> dataloader = DataLoader(my_iterable_dataset, batch_size=32)
```
If the dataset is split in several shards (i.e. if the dataset consists of multiple data files), then you can stream in parallel using `num_workers`:
```py
>>> my_iterable_dataset = load_dataset("c4", "en", streaming=True, split="train")
>>> my_iterable_dataset.n_shards
1024
>>> dataloader = DataLoader(my_iterable_dataset, batch_size=32, num_workers=4)
```
In this case each worker is given a subset of the list of shards to stream from.
### Distributed
To split your dataset across your training nodes, you can use [`datasets.distributed.split_dataset_by_node`]:
```python
import os
from datasets.distributed import split_dataset_by_node
ds = split_dataset_by_node(ds, rank=int(os.environ["RANK"]), world_size=int(os.environ["WORLD_SIZE"]))
```
This works for both map-style datasets and iterable datasets.
The dataset is split for the node at rank `rank` in a pool of nodes of size `world_size`.
For map-style datasets:
Each node is assigned a chunk of data, e.g. rank 0 is given the first chunk of the dataset.
For iterable datasets:
If the dataset has a number of shards that is a factor of `world_size` (i.e. if `dataset.n_shards % world_size == 0`),
then the shards are evenly assigned across the nodes, which is the most optimized.
Otherwise, each node keeps 1 example out of `world_size`, skipping the other examples.
This can also be combined with a `torch.utils.data.DataLoader` if you want each node to use multiple workers to load the data.
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/object_detection.mdx | # Object detection
Object detection models identify something in an image, and object detection datasets are used for applications such as autonomous driving and detecting natural hazards like wildfire. This guide will show you how to apply transformations to an object detection dataset following the [tutorial](https://albumentations.ai/docs/examples/example_bboxes/) from [Albumentations](https://albumentations.ai/docs/).
To run these examples, make sure you have up-to-date versions of `albumentations` and `cv2` installed:
```
pip install -U albumentations opencv-python
```
In this example, you'll use the [`cppe-5`](https://huggingface.co/datasets/cppe-5) dataset for identifying medical personal protective equipment (PPE) in the context of the COVID-19 pandemic.
Load the dataset and take a look at an example:
```py
from datasets import load_dataset
>>> ds = load_dataset("cppe-5")
>>> example = ds['train'][0]
>>> example
{'height': 663,
'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=943x663 at 0x7FC3DC756250>,
'image_id': 15,
'objects': {'area': [3796, 1596, 152768, 81002],
'bbox': [[302.0, 109.0, 73.0, 52.0],
[810.0, 100.0, 57.0, 28.0],
[160.0, 31.0, 248.0, 616.0],
[741.0, 68.0, 202.0, 401.0]],
'category': [4, 4, 0, 0],
'id': [114, 115, 116, 117]},
'width': 943}
```
The dataset has the following fields:
- `image`: PIL.Image.Image object containing the image.
- `image_id`: The image ID.
- `height`: The image height.
- `width`: The image width.
- `objects`: A dictionary containing bounding box metadata for the objects in the image:
- `id`: The annotation id.
- `area`: The area of the bounding box.
- `bbox`: The object's bounding box (in the [coco](https://albumentations.ai/docs/getting_started/bounding_boxes_augmentation/#coco) format).
- `category`: The object's category, with possible values including `Coverall (0)`, `Face_Shield (1)`, `Gloves (2)`, `Goggles (3)` and `Mask (4)`.
You can visualize the `bboxes` on the image using some internal torch utilities. To do that, you will need to reference the [`~datasets.ClassLabel`] feature associated with the category IDs so you can look up the string labels:
```py
>>> import torch
>>> from torchvision.ops import box_convert
>>> from torchvision.utils import draw_bounding_boxes
>>> from torchvision.transforms.functional import pil_to_tensor, to_pil_image
>>> categories = ds['train'].features['objects'].feature['category']
>>> boxes_xywh = torch.tensor(example['objects']['bbox'])
>>> boxes_xyxy = box_convert(boxes_xywh, 'xywh', 'xyxy')
>>> labels = [categories.int2str(x) for x in example['objects']['category']]
>>> to_pil_image(
... draw_bounding_boxes(
... pil_to_tensor(example['image']),
... boxes_xyxy,
... colors="red",
... labels=labels,
... )
... )
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/nateraw/documentation-images/resolve/main/visualize_detection_example.png">
</div>
With `albumentations`, you can apply transforms that will affect the image while also updating the `bboxes` accordingly. In this case, the image is resized to (480, 480), flipped horizontally, and brightened.
`albumentations` expects the image to be in BGR format, not RGB, so you'll have to convert the image before applying the transform.
```py
>>> import albumentations
>>> import numpy as np
>>> transform = albumentations.Compose([
... albumentations.Resize(480, 480),
... albumentations.HorizontalFlip(p=1.0),
... albumentations.RandomBrightnessContrast(p=1.0),
... ], bbox_params=albumentations.BboxParams(format='coco', label_fields=['category']))
>>> # RGB PIL Image -> BGR Numpy array
>>> image = np.flip(np.array(example['image']), -1)
>>> out = transform(
... image=image,
... bboxes=example['objects']['bbox'],
... category=example['objects']['category'],
... )
```
Now when you visualize the result, the image should be flipped, but the `bboxes` should still be in the right places.
```py
>>> image = torch.tensor(out['image']).flip(-1).permute(2, 0, 1)
>>> boxes_xywh = torch.stack([torch.tensor(x) for x in out['bboxes']])
>>> boxes_xyxy = box_convert(boxes_xywh, 'xywh', 'xyxy')
>>> labels = [categories.int2str(x) for x in out['category']]
>>> to_pil_image(
... draw_bounding_boxes(
... image,
... boxes_xyxy,
... colors='red',
... labels=labels
... )
... )
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/nateraw/documentation-images/resolve/main/visualize_detection_example_transformed.png">
</div>
Create a function to apply the transform to a batch of examples:
```py
>>> def transforms(examples):
... images, bboxes, categories = [], [], []
... for image, objects in zip(examples['image'], examples['objects']):
... image = np.array(image.convert("RGB"))[:, :, ::-1]
... out = transform(
... image=image,
... bboxes=objects['bbox'],
... category=objects['category']
... )
... images.append(torch.tensor(out['image']).flip(-1).permute(2, 0, 1))
... bboxes.append(torch.tensor(out['bboxes']))
... categories.append(out['category'])
... return {'image': images, 'bbox': bboxes, 'category': categories}
```
Use the [`~Dataset.set_transform`] function to apply the transform on-the-fly which consumes less disk space. The randomness of data augmentation may return a different image if you access the same example twice. It is especially useful when training a model for several epochs.
```py
>>> ds['train'].set_transform(transforms)
```
You can verify the transform works by visualizing the 10th example:
```py
>>> example = ds['train'][10]
>>> to_pil_image(
... draw_bounding_boxes(
... example['image'],
... box_convert(example['bbox'], 'xywh', 'xyxy'),
... colors='red',
... labels=[categories.int2str(x) for x in example['category']]
... )
... )
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/nateraw/documentation-images/resolve/main/visualize_detection_example_transformed_2.png">
</div>
<Tip>
Now that you know how to process a dataset for object detection, learn
[how to train an object detection model](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/YOLOS/Fine_tuning_YOLOS_for_object_detection_on_custom_dataset_(balloon).ipynb)
and use it for inference.
</Tip> | 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/about_metrics.mdx | # All about metrics
<Tip warning={true}>
Metrics is deprecated in π€ Datasets. To learn more about how to use metrics, take a look at the library π€ [Evaluate](https://huggingface.co/docs/evaluate/index)! In addition to metrics, you can find more tools for evaluating models and datasets.
</Tip>
π€ Datasets provides access to a wide range of NLP metrics. You can load metrics associated with benchmark datasets like GLUE or SQuAD, and complex metrics like BLEURT or BERTScore, with a single command: [`load_metric`]. Once you've loaded a metric, easily compute and evaluate a model's performance.
## ELI5: `load_metric`
Loading a dataset and loading a metric share many similarities. This was an intentional design choice because we wanted to create a simple and unified experience. When you call [`load_metric`], the metric loading script is downloaded and imported from GitHub (if it hasn't already been downloaded before). It contains information about the metric such as it's citation, homepage, and description.
The metric loading script will instantiate and return a [`Metric`] object. This stores the predictions and references, which you need to compute the metric values. The [`Metric`] object is stored as an Apache Arrow table. As a result, the predictions and references are stored directly on disk with memory-mapping. This enables π€ Datasets to do a lazy computation of the metric, and makes it easier to gather all the predictions in a distributed setting.
## Distributed evaluation
Computing metrics in a distributed environment can be tricky. Metric evaluation is executed in separate Python processes, or nodes, on different subsets of a dataset. Typically, when a metric score is additive (`f(AuB) = f(A) + f(B)`), you can use distributed reduce operations to gather the scores for each subset of the dataset. But when a metric is non-additive (`f(AuB) β f(A) + f(B)`), it's not that simple. For example, you can't take the sum of the [F1](https://huggingface.co/metrics/f1) scores of each data subset as your **final metric**.
A common way to overcome this issue is to fallback on single process evaluation. The metrics are evaluated on a single GPU, which becomes inefficient.
π€ Datasets solves this issue by only computing the final metric on the first node. The predictions and references are computed and provided to the metric separately for each node. These are temporarily stored in an Apache Arrow table, avoiding cluttering the GPU or CPU memory. When you are ready to [`Metric.compute`] the final metric, the first node is able to access the predictions and references stored on all the other nodes. Once it has gathered all the predictions and references, [`Metric.compute`] will perform the final metric evaluation.
This solution allows π€ Datasets to perform distributed predictions, which is important for evaluation speed in distributed settings. At the same time, you can also use complex non-additive metrics without wasting valuable GPU or CPU memory. | 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/use_with_spark.mdx | # Use with Spark
This document is a quick introduction to using π€ Datasets with Spark, with a particular focus on how to load a Spark DataFrame into a [`Dataset`] object.
From there, you have fast access to any element and you can use it as a data loader to train models.
## Load from Spark
A [`Dataset`] object is a wrapper of an Arrow table, which allows fast reads from arrays in the dataset to PyTorch, TensorFlow and JAX tensors.
The Arrow table is memory mapped from disk, which can load datasets bigger than your available RAM.
You can get a [`Dataset`] from a Spark DataFrame using [`Dataset.from_spark`]:
```py
>>> from datasets import Dataset
>>> df = spark.createDataFrame(
... data=[[1, "Elia"], [2, "Teo"], [3, "Fang"]],
... columns=["id", "name"],
... )
>>> ds = Dataset.from_spark(df)
```
The Spark workers write the dataset on disk in a cache directory as Arrow files, and the [`Dataset`] is loaded from there.
Alternatively, you can skip materialization by using [`IterableDataset.from_spark`], which returns an [`IterableDataset`]:
```py
>>> from datasets import IterableDataset
>>> df = spark.createDataFrame(
... data=[[1, "Elia"], [2, "Teo"], [3, "Fang"]],
... columns=["id", "name"],
... )
>>> ds = IterableDataset.from_spark(df)
>>> print(next(iter(ds)))
{"id": 1, "name": "Elia"}
```
### Caching
When using [`Dataset.from_spark`], the resulting [`Dataset`] is cached; if you call [`Dataset.from_spark`] multiple
times on the same DataFrame it won't re-run the Spark job that writes the dataset as Arrow files on disk.
You can set the cache location by passing `cache_dir=` to [`Dataset.from_spark`].
Make sure to use a disk that is available to both your workers and your current machine (the driver).
<Tip warning={true}>
In a different session, a Spark DataFrame doesn't have the same [semantic hash](https://spark.apache.org/docs/3.2.0/api/python/reference/api/pyspark.sql.DataFrame.semanticHash.html), and it will rerun a Spark job and store it in a new cache.
</Tip>
### Feature types
If your dataset is made of images, audio data or N-dimensional arrays, you can specify the `features=` argument in
[`Dataset.from_spark`] (or [`IterableDataset.from_spark`]):
```py
>>> from datasets import Dataset, Features, Image, Value
>>> data = [(0, open("image.png", "rb").read())]
>>> df = spark.createDataFrame(data, "idx: int, image: binary")
>>> # Also works if you have arrays
>>> # data = [(0, np.zeros(shape=(32, 32, 3), dtype=np.int32).tolist())]
>>> # df = spark.createDataFrame(data, "idx: int, image: array<array<array<int>>>")
>>> features = Features({"idx": Value("int64"), "image": Image()})
>>> dataset = Dataset.from_spark(df, features=features)
>>> dataset[0]
{'idx': 0, 'image': <PIL.PngImagePlugin.PngImageFile image mode=RGB size=32x32>}
```
You can check the [`Features`] documentation to know about all the feature types available.
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/_redirects.yml | # This first_section was backported from nginx
loading_datasets: loading
share_dataset: share
quicktour: quickstart
dataset_streaming: stream
torch_tensorflow: use_dataset
splits: loading#slice-splits
processing: process
faiss_and_ea: faiss_es
features: about_dataset_features
using_metrics: how_to_metrics
exploring: access
# end of first_section
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/create_dataset.mdx | # Create a dataset
Sometimes, you may need to create a dataset if you're working with your own data. Creating a dataset with π€ Datasets confers all the advantages of the library to your dataset: fast loading and processing, [stream enormous datasets](stream), [memory-mapping](https://huggingface.co/course/chapter5/4?fw=pt#the-magic-of-memory-mapping), and more. You can easily and rapidly create a dataset with π€ Datasets low-code approaches, reducing the time it takes to start training a model. In many cases, it is as easy as [dragging and dropping](upload_dataset#upload-with-the-hub-ui) your data files into a dataset repository on the Hub.
In this tutorial, you'll learn how to use π€ Datasets low-code methods for creating all types of datasets:
* Folder-based builders for quickly creating an image or audio dataset
* `from_` methods for creating datasets from local files
## Folder-based builders
There are two folder-based builders, [`ImageFolder`] and [`AudioFolder`]. These are low-code methods for quickly creating an image or speech and audio dataset with several thousand examples. They are great for rapidly prototyping computer vision and speech models before scaling to a larger dataset. Folder-based builders takes your data and automatically generates the dataset's features, splits, and labels. Under the hood:
* [`ImageFolder`] uses the [`~datasets.Image`] feature to decode an image file. Many image extension formats are supported, such as jpg and png, but other formats are also supported. You can check the complete [list](https://github.com/huggingface/datasets/blob/b5672a956d5de864e6f5550e493527d962d6ae55/src/datasets/packaged_modules/imagefolder/imagefolder.py#L39) of supported image extensions.
* [`AudioFolder`] uses the [`~datasets.Audio`] feature to decode an audio file. Audio extensions such as wav and mp3 are supported, and you can check the complete [list](https://github.com/huggingface/datasets/blob/b5672a956d5de864e6f5550e493527d962d6ae55/src/datasets/packaged_modules/audiofolder/audiofolder.py#L39) of supported audio extensions.
The dataset splits are generated from the repository structure, and the label names are automatically inferred from the directory name.
For example, if your image dataset (it is the same for an audio dataset) is stored like this:
```
pokemon/train/grass/bulbasaur.png
pokemon/train/fire/charmander.png
pokemon/train/water/squirtle.png
pokemon/test/grass/ivysaur.png
pokemon/test/fire/charmeleon.png
pokemon/test/water/wartortle.png
```
Then this is how the folder-based builder generates an example:
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/folder-based-builder.png"/>
</div>
Create the image dataset by specifying `imagefolder` in [`load_dataset`]:
```py
>>> from datasets import ImageFolder
>>> dataset = load_dataset("imagefolder", data_dir="/path/to/pokemon")
```
An audio dataset is created in the same way, except you specify `audiofolder` in [`load_dataset`] instead:
```py
>>> from datasets import AudioFolder
>>> dataset = load_dataset("audiofolder", data_dir="/path/to/folder")
```
Any additional information about your dataset, such as text captions or transcriptions, can be included with a `metadata.csv` file in the folder containing your dataset. The metadata file needs to have a `file_name` column that links the image or audio file to its corresponding metadata:
```
file_name, text
bulbasaur.png, There is a plant seed on its back right from the day this PokΓ©mon is born.
charmander.png, It has a preference for hot things.
squirtle.png, When it retracts its long neck into its shell, it squirts out water with vigorous force.
```
To learn more about each of these folder-based builders, check out the and <a href="https://huggingface.co/docs/datasets/image_dataset#imagefolder"><span class="underline decoration-yellow-400 decoration-2 font-semibold">ImageFolder</span></a> or <a href="https://huggingface.co/docs/datasets/audio_dataset#audiofolder"><span class="underline decoration-pink-400 decoration-2 font-semibold">AudioFolder</span></a> guides.
## From local files
You can also create a dataset from local files by specifying the path to the data files. There are two ways you can create a dataset using the `from_` methods:
* The [`~Dataset.from_generator`] method is the most memory-efficient way to create a dataset from a [generator](https://wiki.python.org/moin/Generators) due to a generators iterative behavior. This is especially useful when you're working with a really large dataset that may not fit in memory, since the dataset is generated on disk progressively and then memory-mapped.
```py
>>> from datasets import Dataset
>>> def gen():
... yield {"pokemon": "bulbasaur", "type": "grass"}
... yield {"pokemon": "squirtle", "type": "water"}
>>> ds = Dataset.from_generator(gen)
>>> ds[0]
{"pokemon": "bulbasaur", "type": "grass"}
```
A generator-based [`IterableDataset`] needs to be iterated over with a `for` loop for example:
```py
>>> from datasets import IterableDataset
>>> ds = IterableDataset.from_generator(gen)
>>> for example in ds:
... print(example)
{"pokemon": "bulbasaur", "type": "grass"}
{"pokemon": "squirtle", "type": "water"}
```
* The [`~Dataset.from_dict`] method is a straightforward way to create a dataset from a dictionary:
```py
>>> from datasets import Dataset
>>> ds = Dataset.from_dict({"pokemon": ["bulbasaur", "squirtle"], "type": ["grass", "water"]})
>>> ds[0]
{"pokemon": "bulbasaur", "type": "grass"}
```
To create an image or audio dataset, chain the [`~Dataset.cast_column`] method with [`~Dataset.from_dict`] and specify the column and feature type. For example, to create an audio dataset:
```py
>>> audio_dataset = Dataset.from_dict({"audio": ["path/to/audio_1", ..., "path/to/audio_n"]}).cast_column("audio", Audio())
```
## Next steps
We didn't mention this in the tutorial, but you can also create a dataset with a loading script. A loading script is a more manual and code-intensive method for creating a dataset, but it also gives you the most flexibility and control over how a dataset is generated. It lets you configure additional options such as creating multiple configurations within a dataset, or enabling your dataset to be streamed.
To learn more about how to write loading scripts, take a look at the <a href="https://huggingface.co/docs/datasets/main/en/image_dataset#loading-script"><span class="underline decoration-yellow-400 decoration-2 font-semibold">image loading script</span></a>, <a href="https://huggingface.co/docs/datasets/main/en/audio_dataset"><span class="underline decoration-pink-400 decoration-2 font-semibold">audio loading script</span></a>, and <a href="https://huggingface.co/docs/datasets/main/en/dataset_script"><span class="underline decoration-green-400 decoration-2 font-semibold">text loading script</span></a> guides.
Now that you know how to create a dataset, consider sharing it on the Hub so the community can also benefit from your work! Go on to the next section to learn how to share your dataset. | 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/depth_estimation.mdx | # Depth estimation
Depth estimation datasets are used to train a model to approximate the relative distance of every pixel in an
image from the camera, also known as depth. The applications enabled by these datasets primarily lie in areas like visual machine
perception and perception in robotics. Example applications include mapping streets for self-driving cars. This guide will show you how to apply transformations
to a depth estimation dataset.
Before you start, make sure you have up-to-date versions of `albumentations` installed:
```bash
pip install -U albumentations
```
[Albumentations](https://albumentations.ai/) is a Python library for performing data augmentation
for computer vision. It supports various computer vision tasks such as image classification, object
detection, segmentation, and keypoint estimation.
This guide uses the [NYU Depth V2](https://huggingface.co/datasets/sayakpaul/nyu_depth_v2) dataset which is
comprised of video sequences from various indoor scenes, recorded by RGB and depth cameras. The dataset consists of scenes from 3 cities and provides images along with
their depth maps as labels.
Load the `train` split of the dataset and take a look at an example:
```py
>>> from datasets import load_dataset
>>> train_dataset = load_dataset("sayakpaul/nyu_depth_v2", split="train")
>>> index = 17
>>> example = train_dataset[index]
>>> example
{'image': <PIL.PngImagePlugin.PngImageFile image mode=RGB size=640x480>,
'depth_map': <PIL.TiffImagePlugin.TiffImageFile image mode=F size=640x480>}
```
The dataset has two fields:
* `image`: a PIL PNG image object with `uint8` data type.
* `depth_map`: a PIL Tiff image object with `float32` data type which is the depth map of the image.
It is mention-worthy that JPEG/PNG format can only store `uint8` or `uint16` data. As the depth map is `float32` data, it can't be stored using PNG/JPEG. However, we can save the depth map using TIFF format as it supports a wider range of data types, including `float32` data.
Next, check out an image with:
```py
>>> example["image"]
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/depth_est_sample.png">
</div>
Before we look at the depth map, we need to first convert its data type to `uint8` using `.convert('RGB')` as PIL can't display `float32` images. Now take a look at its corresponding depth map:
```py
>>> example["depth_map"].convert("RGB")
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/depth_est_target.png">
</div>
It's all black! You'll need to add some color to the depth map to visualize it properly. To do that, either we can apply color automatically during display using `plt.imshow()` or create a colored depth map using `plt.cm` and then display it. In this example, we have used the latter one, as we can save/write the colored depth map later. (the utility below is taken from the [FastDepth repository](https://github.com/dwofk/fast-depth/blob/master/utils.py)).
```py
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> cmap = plt.cm.viridis
>>> def colored_depthmap(depth, d_min=None, d_max=None):
... if d_min is None:
... d_min = np.min(depth)
... if d_max is None:
... d_max = np.max(depth)
... depth_relative = (depth - d_min) / (d_max - d_min)
... return 255 * cmap(depth_relative)[:,:,:3]
>>> def show_depthmap(depth_map):
... if not isinstance(depth_map, np.ndarray):
... depth_map = np.array(depth_map)
... if depth_map.ndim == 3:
... depth_map = depth_map.squeeze()
... d_min = np.min(depth_map)
... d_max = np.max(depth_map)
... depth_map = colored_depthmap(depth_map, d_min, d_max)
... plt.imshow(depth_map.astype("uint8"))
... plt.axis("off")
... plt.show()
>>> show_depthmap(example["depth_map"])
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/depth_est_target_viz.png">
</div>
You can also visualize several different images and their corresponding depth maps.
```py
>>> def merge_into_row(input_image, depth_target):
... if not isinstance(input_image, np.ndarray):
... input_image = np.array(input_image)
...
... d_min = np.min(depth_target)
... d_max = np.max(depth_target)
... depth_target_col = colored_depthmap(depth_target, d_min, d_max)
... img_merge = np.hstack([input_image, depth_target_col])
...
... return img_merge
>>> random_indices = np.random.choice(len(train_dataset), 9).tolist()
>>> plt.figure(figsize=(15, 6))
>>> for i, idx in enumerate(random_indices):
... example = train_dataset[idx]
... ax = plt.subplot(3, 3, i + 1)
... image_viz = merge_into_row(
... example["image"], example["depth_map"]
... )
... plt.imshow(image_viz.astype("uint8"))
... plt.axis("off")
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/depth_est_collage.png">
</div>
Now apply some augmentations with `albumentations`. The augmentation transformations include:
* Random horizontal flipping
* Random cropping
* Random brightness and contrast
* Random gamma correction
* Random hue saturation
```py
>>> import albumentations as A
>>> crop_size = (448, 576)
>>> transforms = [
... A.HorizontalFlip(p=0.5),
... A.RandomCrop(crop_size[0], crop_size[1]),
... A.RandomBrightnessContrast(),
... A.RandomGamma(),
... A.HueSaturationValue()
... ]
```
Additionally, define a mapping to better reflect the target key name.
```py
>>> additional_targets = {"depth": "mask"}
>>> aug = A.Compose(transforms=transforms, additional_targets=additional_targets)
```
With `additional_targets` defined, you can pass the target depth maps to the `depth` argument of `aug` instead of `mask`. You'll notice this change
in the `apply_transforms()` function defined below.
Create a function to apply the transformation to the images as well as their depth maps:
```py
>>> def apply_transforms(examples):
... transformed_images, transformed_maps = [], []
... for image, depth_map in zip(examples["image"], examples["depth_map"]):
... image, depth_map = np.array(image), np.array(depth_map)
... transformed = aug(image=image, depth=depth_map)
... transformed_images.append(transformed["image"])
... transformed_maps.append(transformed["depth"])
...
... examples["pixel_values"] = transformed_images
... examples["labels"] = transformed_maps
... return examples
```
Use the [`~Dataset.set_transform`] function to apply the transformation on-the-fly to batches of the dataset to consume less disk space:
```py
>>> train_dataset.set_transform(apply_transforms)
```
You can verify the transformation worked by indexing into the `pixel_values` and `labels` of an example image:
```py
>>> example = train_dataset[index]
>>> plt.imshow(example["pixel_values"])
>>> plt.axis("off")
>>> plt.show()
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/depth_est_sample_aug.png">
</div>
Visualize the same transformation on the image's corresponding depth map:
```py
>>> show_depthmap(example["labels"])
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/depth_est_target_aug.png">
</div>
You can also visualize multiple training samples reusing the previous `random_indices`:
```py
>>> plt.figure(figsize=(15, 6))
>>> for i, idx in enumerate(random_indices):
... ax = plt.subplot(3, 3, i + 1)
... example = train_dataset[idx]
... image_viz = merge_into_row(
... example["pixel_values"], example["labels"]
... )
... plt.imshow(image_viz.astype("uint8"))
... plt.axis("off")
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/depth_est_aug_collage.png">
</div> | 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/stream.mdx | # Stream
Dataset streaming lets you work with a dataset without downloading it.
The data is streamed as you iterate over the dataset.
This is especially helpful when:
- You don't want to wait for an extremely large dataset to download.
- The dataset size exceeds the amount of available disk space on your computer.
- You want to quickly explore just a few samples of a dataset.
<div class="flex justify-center">
<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/streaming.gif"/>
<img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/streaming-dark.gif"/>
</div>
For example, the English split of the [oscar-corpus/OSCAR-2201](https://huggingface.co/datasets/oscar-corpus/OSCAR-2201) dataset is 1.2 terabytes, but you can use it instantly with streaming. Stream a dataset by setting `streaming=True` in [`load_dataset`] as shown below:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset('oscar-corpus/OSCAR-2201', 'en', split='train', streaming=True)
>>> print(next(iter(dataset)))
{'id': 0, 'text': 'Founded in 2015, Golden Bees is a leading programmatic recruitment platform dedicated to employers, HR agencies and job boards. The company has developed unique HR-custom technologies and predictive algorithms to identify and attract the best candidates for a job opportunity.', ...
```
Dataset streaming also lets you work with a dataset made of local files without doing any conversion.
In this case, the data is streamed from the local files as you iterate over the dataset.
This is especially helpful when:
- You don't want to wait for an extremely large local dataset to be converted to Arrow.
- The converted files size would exceed the amount of available disk space on your computer.
- You want to quickly explore just a few samples of a dataset.
For example, you can stream a local dataset of hundreds of compressed JSONL files like [oscar-corpus/OSCAR-2201](https://huggingface.co/datasets/oscar-corpus/OSCAR-2201) to use it instantly:
```py
>>> from datasets import load_dataset
>>> data_files = {'train': 'path/to/OSCAR-2201/compressed/en_meta/*.jsonl.gz'}
>>> dataset = load_dataset('json', data_files=data_files, split='train', streaming=True)
>>> print(next(iter(dataset)))
{'id': 0, 'text': 'Founded in 2015, Golden Bees is a leading programmatic recruitment platform dedicated to employers, HR agencies and job boards. The company has developed unique HR-custom technologies and predictive algorithms to identify and attract the best candidates for a job opportunity.', ...
```
Loading a dataset in streaming mode creates a new dataset type instance (instead of the classic [`Dataset`] object), known as an [`IterableDataset`].
This special type of dataset has its own set of processing methods shown below.
<Tip>
An [`IterableDataset`] is useful for iterative jobs like training a model.
You shouldn't use a [`IterableDataset`] for jobs that require random access to examples because you have to iterate all over it using a for loop. Getting the last example in an iterable dataset would require you to iterate over all the previous examples.
You can find more details in the [Dataset vs. IterableDataset guide](./about_mapstyle_vs_iterable).
</Tip>
## Shuffle
Like a regular [`Dataset`] object, you can also shuffle a [`IterableDataset`] with [`IterableDataset.shuffle`].
The `buffer_size` argument controls the size of the buffer to randomly sample examples from. Let's say your dataset has one million examples, and you set the `buffer_size` to ten thousand. [`IterableDataset.shuffle`] will randomly select examples from the first ten thousand examples in the buffer. Selected examples in the buffer are replaced with new examples. By default, the buffer size is 1,000.
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset('oscar', "unshuffled_deduplicated_en", split='train', streaming=True)
>>> shuffled_dataset = dataset.shuffle(seed=42, buffer_size=10_000)
```
<Tip>
[`IterableDataset.shuffle`] will also shuffle the order of the shards if the dataset is sharded into multiple files.
</Tip>
## Reshuffle
Sometimes you may want to reshuffle the dataset after each epoch. This will require you to set a different seed for each epoch. Use [`IterableDataset.set_epoch`] in between epochs to tell the dataset what epoch you're on.
Your seed effectively becomes: `initial seed + current epoch`.
```py
>>> for epoch in range(epochs):
... shuffled_dataset.set_epoch(epoch)
... for example in shuffled_dataset:
... ...
```
## Split dataset
You can split your dataset one of two ways:
- [`IterableDataset.take`] returns the first `n` examples in a dataset:
```py
>>> dataset = load_dataset('oscar', "unshuffled_deduplicated_en", split='train', streaming=True)
>>> dataset_head = dataset.take(2)
>>> list(dataset_head)
[{'id': 0, 'text': 'Mtendere Village was...'}, {'id': 1, 'text': 'Lily James cannot fight the music...'}]
```
- [`IterableDataset.skip`] omits the first `n` examples in a dataset and returns the remaining examples:
```py
>>> train_dataset = shuffled_dataset.skip(1000)
```
<Tip warning={true}>
`take` and `skip` prevent future calls to `shuffle` because they lock in the order of the shards. You should `shuffle` your dataset before splitting it.
</Tip>
<a id='interleave_datasets'></a>
## Interleave
[`interleave_datasets`] can combine an [`IterableDataset`] with other datasets. The combined dataset returns alternating examples from each of the original datasets.
```py
>>> from datasets import interleave_datasets
>>> en_dataset = load_dataset('oscar', "unshuffled_deduplicated_en", split='train', streaming=True)
>>> fr_dataset = load_dataset('oscar', "unshuffled_deduplicated_fr", split='train', streaming=True)
>>> multilingual_dataset = interleave_datasets([en_dataset, fr_dataset])
>>> list(multilingual_dataset.take(2))
[{'text': 'Mtendere Village was inspired by the vision...'}, {'text': "MΓ©dia de dΓ©bat d'idΓ©es, de culture et de littΓ©rature..."}]
```
Define sampling probabilities from each of the original datasets for more control over how each of them are sampled and combined. Set the `probabilities` argument with your desired sampling probabilities:
```py
>>> multilingual_dataset_with_oversampling = interleave_datasets([en_dataset, fr_dataset], probabilities=[0.8, 0.2], seed=42)
>>> list(multilingual_dataset_with_oversampling.take(2))
[{'text': 'Mtendere Village was inspired by the vision...'}, {'text': 'Lily James cannot fight the music...'}]
```
Around 80% of the final dataset is made of the `en_dataset`, and 20% of the `fr_dataset`.
You can also specify the `stopping_strategy`. The default strategy, `first_exhausted`, is a subsampling strategy, i.e the dataset construction is stopped as soon one of the dataset runs out of samples.
You can specify `stopping_strategy=all_exhausted` to execute an oversampling strategy. In this case, the dataset construction is stopped as soon as every samples in every dataset has been added at least once. In practice, it means that if a dataset is exhausted, it will return to the beginning of this dataset until the stop criterion has been reached.
Note that if no sampling probabilities are specified, the new dataset will have `max_length_datasets*nb_dataset samples`.
## Rename, remove, and cast
The following methods allow you to modify the columns of a dataset. These methods are useful for renaming or removing columns and changing columns to a new set of features.
### Rename
Use [`IterableDataset.rename_column`] when you need to rename a column in your dataset. Features associated with the original column are actually moved under the new column name, instead of just replacing the original column in-place.
Provide [`IterableDataset.rename_column`] with the name of the original column, and the new column name:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset('mc4', 'en', streaming=True, split='train')
>>> dataset = dataset.rename_column("text", "content")
```
### Remove
When you need to remove one or more columns, give [`IterableDataset.remove_columns`] the name of the column to remove. Remove more than one column by providing a list of column names:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset('mc4', 'en', streaming=True, split='train')
>>> dataset = dataset.remove_columns('timestamp')
```
### Cast
[`IterableDataset.cast`] changes the feature type of one or more columns. This method takes your new `Features` as its argument. The following sample code shows how to change the feature types of `ClassLabel` and `Value`:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset('glue', 'mrpc', split='train', streaming=True)
>>> dataset.features
{'sentence1': Value(dtype='string', id=None),
'sentence2': Value(dtype='string', id=None),
'label': ClassLabel(num_classes=2, names=['not_equivalent', 'equivalent'], names_file=None, id=None),
'idx': Value(dtype='int32', id=None)}
>>> from datasets import ClassLabel, Value
>>> new_features = dataset.features.copy()
>>> new_features["label"] = ClassLabel(names=['negative', 'positive'])
>>> new_features["idx"] = Value('int64')
>>> dataset = dataset.cast(new_features)
>>> dataset.features
{'sentence1': Value(dtype='string', id=None),
'sentence2': Value(dtype='string', id=None),
'label': ClassLabel(num_classes=2, names=['negative', 'positive'], names_file=None, id=None),
'idx': Value(dtype='int64', id=None)}
```
<Tip>
Casting only works if the original feature type and new feature type are compatible. For example, you can cast a column with the feature type `Value('int32')` to `Value('bool')` if the original column only contains ones and zeros.
</Tip>
Use [`IterableDataset.cast_column`] to change the feature type of just one column. Pass the column name and its new feature type as arguments:
```py
>>> dataset.features
{'audio': Audio(sampling_rate=44100, mono=True, id=None)}
>>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16000))
>>> dataset.features
{'audio': Audio(sampling_rate=16000, mono=True, id=None)}
```
## Map
Similar to the [`Dataset.map`] function for a regular [`Dataset`], π€ Datasets features [`IterableDataset.map`] for processing an [`IterableDataset`].
[`IterableDataset.map`] applies processing on-the-fly when examples are streamed.
It allows you to apply a processing function to each example in a dataset, independently or in batches. This function can even create new rows and columns.
The following example demonstrates how to tokenize a [`IterableDataset`]. The function needs to accept and output a `dict`:
```py
>>> def add_prefix(example):
... example['text'] = 'My text: ' + example['text']
... return example
```
Next, apply this function to the dataset with [`IterableDataset.map`]:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset('oscar', 'unshuffled_deduplicated_en', streaming=True, split='train')
>>> updated_dataset = dataset.map(add_prefix)
>>> list(updated_dataset.take(3))
[{'id': 0, 'text': 'My text: Mtendere Village was inspired by...'},
{'id': 1, 'text': 'My text: Lily James cannot fight the music...'},
{'id': 2, 'text': 'My text: "I\'d love to help kickstart...'}]
```
Let's take a look at another example, except this time, you will remove a column with [`IterableDataset.map`]. When you remove a column, it is only removed after the example has been provided to the mapped function. This allows the mapped function to use the content of the columns before they are removed.
Specify the column to remove with the `remove_columns` argument in [`IterableDataset.map`]:
```py
>>> updated_dataset = dataset.map(add_prefix, remove_columns=["id"])
>>> list(updated_dataset.take(3))
[{'text': 'My text: Mtendere Village was inspired by...'},
{'text': 'My text: Lily James cannot fight the music...'},
{'text': 'My text: "I\'d love to help kickstart...'}]
```
### Batch processing
[`IterableDataset.map`] also supports working with batches of examples. Operate on batches by setting `batched=True`. The default batch size is 1000, but you can adjust it with the `batch_size` argument. This opens the door to many interesting applications such as tokenization, splitting long sentences into shorter chunks, and data augmentation.
#### Tokenization
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> dataset = load_dataset("mc4", "en", streaming=True, split="train")
>>> tokenizer = AutoTokenizer.from_pretrained('distilbert-base-uncased')
>>> def encode(examples):
... return tokenizer(examples['text'], truncation=True, padding='max_length')
>>> dataset = dataset.map(encode, batched=True, remove_columns=["text", "timestamp", "url"])
>>> next(iter(dataset))
{'input_ids': 101, 8466, 1018, 1010, 4029, 2475, 2062, 18558, 3100, 2061, ...,1106, 3739, 102],
'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ..., 1, 1]}
```
<Tip>
See other examples of batch processing in the [batched map processing](./process#batch-processing) documentation. They work the same for iterable datasets.
</Tip>
### Filter
You can filter rows in the dataset based on a predicate function using [`Dataset.filter`]. It returns rows that match a specified condition:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset('oscar', 'unshuffled_deduplicated_en', streaming=True, split='train')
>>> start_with_ar = dataset.filter(lambda example: example['text'].startswith('Ar'))
>>> next(iter(start_with_ar))
{'id': 4, 'text': 'Are you looking for Number the Stars (Essential Modern Classics)?...'}
```
[`Dataset.filter`] can also filter by indices if you set `with_indices=True`:
```py
>>> even_dataset = dataset.filter(lambda example, idx: idx % 2 == 0, with_indices=True)
>>> list(even_dataset.take(3))
[{'id': 0, 'text': 'Mtendere Village was inspired by the vision of Chief Napoleon Dzombe, ...'},
{'id': 2, 'text': '"I\'d love to help kickstart continued development! And 0 EUR/month...'},
{'id': 4, 'text': 'Are you looking for Number the Stars (Essential Modern Classics)? Normally, ...'}]
```
## Stream in a training loop
[`IterableDataset`] can be integrated into a training loop. First, shuffle the dataset:
<frameworkcontent>
<pt>
```py
>>> seed, buffer_size = 42, 10_000
>>> dataset = dataset.shuffle(seed, buffer_size=buffer_size)
```
Lastly, create a simple training loop and start training:
```py
>>> import torch
>>> from torch.utils.data import DataLoader
>>> from transformers import AutoModelForMaskedLM, DataCollatorForLanguageModeling
>>> from tqdm import tqdm
>>> dataset = dataset.with_format("torch")
>>> dataloader = DataLoader(dataset, collate_fn=DataCollatorForLanguageModeling(tokenizer))
>>> device = 'cuda' if torch.cuda.is_available() else 'cpu'
>>> model = AutoModelForMaskedLM.from_pretrained("distilbert-base-uncased")
>>> model.train().to(device)
>>> optimizer = torch.optim.AdamW(params=model.parameters(), lr=1e-5)
>>> for epoch in range(3):
... dataset.set_epoch(epoch)
... for i, batch in enumerate(tqdm(dataloader, total=5)):
... if i == 5:
... break
... batch = {k: v.to(device) for k, v in batch.items()}
... outputs = model(**batch)
... loss = outputs[0]
... loss.backward()
... optimizer.step()
... optimizer.zero_grad()
... if i % 10 == 0:
... print(f"loss: {loss}")
```
</pt>
</frameworkcontent>
<!-- TODO: Write the TF content! -->
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/how_to.md | # Overview
The how-to guides offer a more comprehensive overview of all the tools π€ Datasets offers and how to use them. This will help you tackle messier real-world datasets where you may need to manipulate the dataset structure or content to get it ready for training.
The guides assume you are familiar and comfortable with the π€ Datasets basics. We recommend newer users check out our [tutorials](tutorial) first.
<Tip>
Interested in learning more? Take a look at [Chapter 5](https://huggingface.co/course/chapter5/1?fw=pt) of the Hugging Face course!
</Tip>
The guides are organized into six sections:
- <span class="underline decoration-sky-400 decoration-2 font-semibold">General usage</span>: Functions for general dataset loading and processing. The functions shown in this section are applicable across all dataset modalities.
- <span class="underline decoration-pink-400 decoration-2 font-semibold">Audio</span>: How to load, process, and share audio datasets.
- <span class="underline decoration-yellow-400 decoration-2 font-semibold">Vision</span>: How to load, process, and share image datasets.
- <span class="underline decoration-green-400 decoration-2 font-semibold">Text</span>: How to load, process, and share text datasets.
- <span class="underline decoration-orange-400 decoration-2 font-semibold">Tabular</span>: How to load, process, and share tabular datasets.
- <span class="underline decoration-indigo-400 decoration-2 font-semibold">Dataset repository</span>: How to share and upload a dataset to the <a href="https://huggingface.co/datasets">Hub</a>.
If you have any questions about π€ Datasets, feel free to join and ask the community on our [forum](https://discuss.huggingface.co/c/datasets/10).
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/cache.mdx | # Cache management
When you download a dataset, the processing scripts and data are stored locally on your computer. The cache allows π€ Datasets to avoid re-downloading or processing the entire dataset every time you use it.
This guide will show you how to:
- Change the cache directory.
- Control how a dataset is loaded from the cache.
- Clean up cache files in the directory.
- Enable or disable caching.
## Cache directory
The default cache directory is `~/.cache/huggingface/datasets`. Change the cache location by setting the shell environment variable, `HF_DATASETS_CACHE` to another directory:
```
$ export HF_DATASETS_CACHE="/path/to/another/directory"
```
When you load a dataset, you also have the option to change where the data is cached. Change the `cache_dir` parameter to the path you want:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset('LOADING_SCRIPT', cache_dir="PATH/TO/MY/CACHE/DIR")
```
Similarly, you can change where a metric is cached with the `cache_dir` parameter:
```py
>>> from datasets import load_metric
>>> metric = load_metric('glue', 'mrpc', cache_dir="MY/CACHE/DIRECTORY")
```
## Download mode
After you download a dataset, control how it is loaded by [`load_dataset`] with the `download_mode` parameter. By default, π€ Datasets will reuse a dataset if it exists. But if you need the original dataset without any processing functions applied, re-download the files as shown below:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset('squad', download_mode='force_redownload')
```
Refer to [`DownloadMode`] for a full list of download modes.
## Cache files
Clean up the cache files in the directory with [`Dataset.cleanup_cache_files`]:
```py
# Returns the number of removed cache files
>>> dataset.cleanup_cache_files()
2
```
## Enable or disable caching
If you're using a cached file locally, it will automatically reload the dataset with any previous transforms you applied to the dataset. Disable this behavior by setting the argument `load_from_cache_file=False` in [`Dataset.map`]:
```py
>>> updated_dataset = small_dataset.map(add_prefix, load_from_cache_file=False)
```
In the example above, π€ Datasets will execute the function `add_prefix` over the entire dataset again instead of loading the dataset from its previous state.
Disable caching on a global scale with [`disable_caching`]:
```py
>>> from datasets import disable_caching
>>> disable_caching()
```
When you disable caching, π€ Datasets will no longer reload cached files when applying transforms to datasets. Any transform you apply on your dataset will be need to be reapplied.
<Tip>
If you want to reuse a dataset from scratch, try setting the `download_mode` parameter in [`load_dataset`] instead.
</Tip>
You can also avoid caching your metric entirely, and keep it in CPU memory instead:
```py
>>> from datasets import load_metric
>>> metric = load_metric('glue', 'mrpc', keep_in_memory=True)
```
<Tip warning={true}>
Keeping the predictions in-memory is not possible in a distributed setting since the CPU memory spaces of the various processes are not shared.
</Tip>
<a id='load_dataset_enhancing_performance'></a>
## Improve performance
Disabling the cache and copying the dataset in-memory will speed up dataset operations. There are two options for copying the dataset in-memory:
1. Set `datasets.config.IN_MEMORY_MAX_SIZE` to a nonzero value (in bytes) that fits in your RAM memory.
2. Set the environment variable `HF_DATASETS_IN_MEMORY_MAX_SIZE` to a nonzero value. Note that the first method takes higher precedence.
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/about_map_batch.mdx | # Batch mapping
Combining the utility of [`Dataset.map`] with batch mode is very powerful. It allows you to speed up processing, and freely control the size of the generated dataset.
## Need for speed
The primary objective of batch mapping is to speed up processing. Often times, it is faster to work with batches of data instead of single examples. Naturally, batch mapping lends itself to tokenization. For example, the π€ [Tokenizers](https://huggingface.co/docs/tokenizers/python/latest/) library works faster with batches because it parallelizes the tokenization of all the examples in a batch.
## Input size != output size
The ability to control the size of the generated dataset can be leveraged for many interesting use-cases. In the How-to [map](#map) section, there are examples of using batch mapping to:
- Split long sentences into shorter chunks.
- Augment a dataset with additional tokens.
It is helpful to understand how this works, so you can come up with your own ways to use batch mapping. At this point, you may be wondering how you can control the size of the generated dataset. The answer is: **the mapped function does not have to return an output batch of the same size**.
In other words, your mapped function input can be a batch of size `N` and return a batch of size `M`. The output `M` can be greater than or less than `N`. This means you can concatenate your examples, divide it up, and even add more examples!
However, remember that all values in the output dictionary must contain the **same number of elements** as the other fields in the output dictionary. Otherwise, it is not possible to define the number of examples in the output returned by the mapped function. The number can vary between successive batches processed by the mapped function. For a single batch though, all values of the output dictionary should have the same length (i.e., the number of elements).
For example, from a dataset of 1 column and 3 rows, if you use `map` to return a new column with twice as many rows, then you will have an error.
In this case, you end up with one column with 3 rows, and one column with 6 rows. As you can see, the table will not be valid:
```py
>>> from datasets import Dataset
>>> dataset = Dataset.from_dict({"a": [0, 1, 2]})
>>> dataset.map(lambda batch: {"b": batch["a"] * 2}, batched=True) # new column with 6 elements: [0, 1, 2, 0, 1, 2]
'ArrowInvalid: Column 1 named b expected length 3 but got length 6'
```
To make it valid, you have to drop one of the columns:
```py
>>> from datasets import Dataset
>>> dataset = Dataset.from_dict({"a": [0, 1, 2]})
>>> dataset_with_duplicates = dataset.map(lambda batch: {"b": batch["a"] * 2}, remove_columns=["a"], batched=True)
>>> len(dataset_with_duplicates)
6
```
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/tabular_load.mdx | # Load tabular data
A tabular dataset is a generic dataset used to describe any data stored in rows and columns, where the rows represent an example and the columns represent a feature (can be continuous or categorical). These datasets are commonly stored in CSV files, Pandas DataFrames, and in database tables. This guide will show you how to load and create a tabular dataset from:
- CSV files
- Pandas DataFrames
- Databases
## CSV files
π€ Datasets can read CSV files by specifying the generic `csv` dataset builder name in the [`~datasets.load_dataset`] method. To load more than one CSV file, pass them as a list to the `data_files` parameter:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("csv", data_files="my_file.csv")
# load multiple CSV files
>>> dataset = load_dataset("csv", data_files=["my_file_1.csv", "my_file_2.csv", "my_file_3.csv"])
```
You can also map specific CSV files to the train and test splits:
```py
>>> dataset = load_dataset("csv", data_files={"train": ["my_train_file_1.csv", "my_train_file_2.csv"], "test": "my_test_file.csv"})
```
To load remote CSV files, pass the URLs instead:
```py
>>> base_url = "https://huggingface.co/datasets/lhoestq/demo1/resolve/main/data/"
>>> dataset = load_dataset('csv', data_files={"train": base_url + "train.csv", "test": base_url + "test.csv"})
```
To load zipped CSV files:
```py
>>> url = "https://domain.org/train_data.zip"
>>> data_files = {"train": url}
>>> dataset = load_dataset("csv", data_files=data_files)
```
## Pandas DataFrames
π€ Datasets also supports loading datasets from [Pandas DataFrames](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html) with the [`~datasets.Dataset.from_pandas`] method:
```py
>>> from datasets import Dataset
>>> import pandas as pd
# create a Pandas DataFrame
>>> df = pd.read_csv("https://huggingface.co/datasets/imodels/credit-card/raw/main/train.csv")
>>> df = pd.DataFrame(df)
# load Dataset from Pandas DataFrame
>>> dataset = Dataset.from_pandas(df)
```
Use the `splits` parameter to specify the name of the dataset split:
```py
>>> train_ds = Dataset.from_pandas(train_df, split="train")
>>> test_ds = Dataset.from_pandas(test_df, split="test")
```
If the dataset doesn't look as expected, you should explicitly [specify your dataset features](loading#specify-features). A [pandas.Series](https://pandas.pydata.org/docs/reference/api/pandas.Series.html) may not always carry enough information for Arrow to automatically infer a data type. For example, if a DataFrame is of length `0` or if the Series only contains `None/NaN` objects, the type is set to `null`.
## Databases
Datasets stored in databases are typically accessed with SQL queries. With π€ Datasets, you can connect to a database, query for the data you need, and create a dataset out of it. Then you can use all the processing features of π€ Datasets to prepare your dataset for training.
### SQLite
SQLite is a small, lightweight database that is fast and easy to set up. You can use an existing database if you'd like, or follow along and start from scratch.
Start by creating a quick SQLite database with this [Covid-19 data](https://github.com/nytimes/covid-19-data/blob/master/us-states.csv) from the New York Times:
```py
>>> import sqlite3
>>> import pandas as pd
>>> conn = sqlite3.connect("us_covid_data.db")
>>> df = pd.read_csv("https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-states.csv")
>>> df.to_sql("states", conn, if_exists="replace")
```
This creates a `states` table in the `us_covid_data.db` database which you can now load into a dataset.
To connect to the database, you'll need the [URI string](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) that identifies your database. Connecting to a database with a URI caches the returned dataset. The URI string differs for each database dialect, so be sure to check the [Database URLs](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) for whichever database you're using.
For SQLite, it is:
```py
>>> uri = "sqlite:///us_covid_data.db"
```
Load the table by passing the table name and URI to [`~datasets.Dataset.from_sql`]:
```py
>>> from datasets import Dataset
>>> ds = Dataset.from_sql("states", uri)
>>> ds
Dataset({
features: ['index', 'date', 'state', 'fips', 'cases', 'deaths'],
num_rows: 54382
})
```
Then you can use all of π€ Datasets process features like [`~datasets.Dataset.filter`] for example:
```py
>>> ds.filter(lambda x: x["state"] == "California")
```
You can also load a dataset from a SQL query instead of an entire table, which is useful for querying and joining multiple tables.
Load the dataset by passing your query and URI to [`~datasets.Dataset.from_sql`]:
```py
>>> from datasets import Dataset
>>> ds = Dataset.from_sql('SELECT * FROM states WHERE state="California";', uri)
>>> ds
Dataset({
features: ['index', 'date', 'state', 'fips', 'cases', 'deaths'],
num_rows: 1019
})
```
Then you can use all of π€ Datasets process features like [`~datasets.Dataset.filter`] for example:
```py
>>> ds.filter(lambda x: x["cases"] > 10000)
```
### PostgreSQL
You can also connect and load a dataset from a PostgreSQL database, however we won't directly demonstrate how in the documentation because the example is only meant to be run in a notebook. Instead, take a look at how to install and setup a PostgreSQL server in this [notebook](https://colab.research.google.com/github/nateraw/huggingface-hub-examples/blob/main/sql_with_huggingface_datasets.ipynb#scrollTo=d83yGQMPHGFi)!
After you've setup your PostgreSQL database, you can use the [`~datasets.Dataset.from_sql`] method to load a dataset from a table or query. | 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/image_process.mdx | # Process image data
This guide shows specific methods for processing image datasets. Learn how to:
- Use [`~Dataset.map`] with image dataset.
- Apply data augmentations to a dataset with [`~Dataset.set_transform`].
For a guide on how to process any type of dataset, take a look at the <a class="underline decoration-sky-400 decoration-2 font-semibold" href="./process">general process guide</a>.
## Map
The [`~Dataset.map`] function can apply transforms over an entire dataset.
For example, create a basic [`Resize`](https://pytorch.org/vision/stable/generated/torchvision.transforms.Resize.html) function:
```py
>>> def transforms(examples):
... examples["pixel_values"] = [image.convert("RGB").resize((100,100)) for image in examples["image"]]
... return examples
```
Now use the [`~Dataset.map`] function to resize the entire dataset, and set `batched=True` to speed up the process by accepting batches of examples. The transform returns `pixel_values` as a cacheable `PIL.Image` object:
```py
>>> dataset = dataset.map(transforms, remove_columns=["image"], batched=True)
>>> dataset[0]
{'label': 6,
'pixel_values': <PIL.PngImagePlugin.PngImageFile image mode=RGB size=100x100 at 0x7F058237BB10>}
```
The cache file saves time because you don't have to execute the same transform twice. The [`~Dataset.map`] function is best for operations you only run once per training - like resizing an image - instead of using it for operations executed for each epoch, like data augmentations.
[`~Dataset.map`] takes up some memory, but you can reduce its memory requirements with the following parameters:
- [`batch_size`](./package_reference/main_classes#datasets.DatasetDict.map.batch_size) determines the number of examples that are processed in one call to the transform function.
- [`writer_batch_size`](./package_reference/main_classes#datasets.DatasetDict.map.writer_batch_size) determines the number of processed examples that are kept in memory before they are stored away.
Both parameter values default to 1000, which can be expensive if you are storing images. Lower these values to use less memory when you use [`~Dataset.map`].
## Apply transforms
π€ Datasets applies data augmentations from any library or package to your dataset. Transforms can be applied on-the-fly on batches of data with [`~Dataset.set_transform`], which consumes less disk space.
<Tip>
The following example uses [torchvision](https://pytorch.org/vision/stable/index.html), but feel free to use other data augmentation libraries like [Albumentations](https://albumentations.ai/docs/), [Kornia](https://kornia.readthedocs.io/en/latest/), and [imgaug](https://imgaug.readthedocs.io/en/latest/).
</Tip>
For example, if you'd like to change the color properties of an image randomly:
```py
>>> from torchvision.transforms import Compose, ColorJitter, ToTensor
>>> jitter = Compose(
... [
... ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.7),
... ToTensor(),
... ]
... )
```
Create a function to apply the `ColorJitter` transform:
```py
>>> def transforms(examples):
... examples["pixel_values"] = [jitter(image.convert("RGB")) for image in examples["image"]]
... return examples
```
Apply the transform with the [`~Dataset.set_transform`] function:
```py
>>> dataset.set_transform(transforms)
``` | 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/upload_dataset.mdx | # Share a dataset to the Hub
The [Hub](https://huggingface.co/datasets) is home to an extensive collection of community-curated and popular research datasets. We encourage you to share your dataset to the Hub to help grow the ML community and accelerate progress for everyone. All contributions are welcome; adding a dataset is just a drag and drop away!
Start by [creating a Hugging Face Hub account](https://huggingface.co/join) if you don't have one yet.
## Upload with the Hub UI
The Hub's web-based interface allows users without any developer experience to upload a dataset.
### Create a repository
A repository hosts all your dataset files, including the revision history, making storing more than one dataset version possible.
1. Click on your profile and select **New Dataset** to create a new dataset repository.
2. Pick a name for your dataset, and choose whether it is a public or private dataset. A public dataset is visible to anyone, whereas a private dataset can only be viewed by you or members of your organization.
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/create_repo.png"/>
</div>
### Upload dataset
1. Once you've created a repository, navigate to the **Files and versions** tab to add a file. Select **Add file** to upload your dataset files. We support many text, audio, and image data extensions such as `.csv`, `.mp3`, and `.jpg` among many others. For text data extensions like `.csv`, `.json`, `.jsonl`, and `.txt`, we recommend compressing them before uploading to the Hub (to `.zip` or `.gz` file extension for example).
Text file extensions are not tracked by Git LFS by default, and if they're greater than 10MB, they will not be committed and uploaded. Take a look at the `.gitattributes` file in your repository for a complete list of tracked file extensions. For this tutorial, you can use the following sample `.csv` files since they're small: <a href="https://huggingface.co/datasets/stevhliu/demo/raw/main/train.csv" download>train.csv</a>, <a href="https://huggingface.co/datasets/stevhliu/demo/raw/main/test.csv" download>test.csv</a>.
<Tip warning={true}>
For additional dataset configuration options, like defining multiple configurations or enabling streaming, you'll need to write a dataset loading script. Check out how to write a dataset loading script for <a href="https://huggingface.co/docs/datasets/dataset_script#create-a-dataset-loading-script"><span class="underline decoration-green-400 decoration-2 font-semibold">text</span></a>, <a href="https://huggingface.co/docs/datasets/audio_dataset#loading-script"><span class="underline decoration-pink-400 decoration-2 font-semibold">audio</span></a>, and <a href="https://huggingface.co/docs/datasets/image_dataset#loading-script"><span class="underline decoration-yellow-400 decoration-2 font-semibold">image</span></a> datasets.
</Tip>
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/upload_files.png"/>
</div>
2. Drag and drop your dataset files and add a brief descriptive commit message.
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/commit_files.png"/>
</div>
3. After uploading your dataset files, they are stored in your dataset repository.
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/files_stored.png"/>
</div>
### Create a Dataset card
Adding a Dataset card is super valuable for helping users find your dataset and understand how to use it responsibly.
1. Click on **Create Dataset Card** to create a Dataset card. This button creates a `README.md` file in your repository.
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/dataset_card.png"/>
</div>
2. At the top, you'll see the **Metadata UI** with several fields to select from like license, language, and task categories. These are the most important tags for helping users discover your dataset on the Hub. When you select an option from each field, they'll be automatically added to the top of the dataset card.
You can also look at the [Dataset Card specifications](https://github.com/huggingface/hub-docs/blob/main/datasetcard.md?plain=1), which has a complete set of (but not required) tag options like `annotations_creators`, to help you choose the appropriate tags.
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/metadata_ui.png"/>
</div>
3. Click on the **Import dataset card template** link at the top of the editor to automatically create a dataset card template. Filling out the template is a great way to introduce your dataset to the community and help users understand how to use it. For a detailed example of what a good Dataset card should look like, take a look at the [CNN DailyMail Dataset card](https://huggingface.co/datasets/cnn_dailymail).
### Load dataset
Once your dataset is stored on the Hub, anyone can load it with the [`load_dataset`] function:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("stevhliu/demo")
```
## Upload with Python
Users who prefer to upload a dataset programmatically can use the [huggingface_hub](https://huggingface.co/docs/huggingface_hub/index) library. This library allows users to interact with the Hub from Python.
1. Begin by installing the library:
```bash
pip install huggingface_hub
```
2. To upload a dataset on the Hub in Python, you need to log in to your Hugging Face account:
```bash
huggingface-cli login
```
3. Use the [`push_to_hub()`](https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.DatasetDict.push_to_hub) function to help you add, commit, and push a file to your repository:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("stevhliu/demo")
# dataset = dataset.map(...) # do all your processing here
>>> dataset.push_to_hub("stevhliu/processed_demo")
```
To set your dataset as private, set the `private` parameter to `True`. This parameter will only work if you are creating a repository for the first time.
```py
>>> dataset.push_to_hub("stevhliu/private_processed_demo", private=True)
```
### Privacy
A private dataset is only accessible by you. Similarly, if you share a dataset within your organization, then members of the organization can also access the dataset.
Load a private dataset by providing your authentication token to the `token` parameter:
```py
>>> from datasets import load_dataset
# Load a private individual dataset
>>> dataset = load_dataset("stevhliu/demo", token=True)
# Load a private organization dataset
>>> dataset = load_dataset("organization/dataset_name", token=True)
```
## What's next?
Congratulations, you've completed the tutorials! π₯³
From here, you can go on to:
- Learn more about how to use π€ Datasets other functions to [process your dataset](process).
- [Stream large datasets](stream) without downloading it locally.
- [Define your dataset splits and configurations](repository_structure) or [loading script](dataset_script) and share your dataset with the community.
If you have any questions about π€ Datasets, feel free to join and ask the community on our [forum](https://discuss.huggingface.co/c/datasets/10).
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/beam.mdx | # Beam Datasets
Some datasets are too large to be processed on a single machine. Instead, you can process them with [Apache Beam](https://beam.apache.org/), a library for parallel data processing. The processing pipeline is executed on a distributed processing backend such as [Apache Flink](https://flink.apache.org/), [Apache Spark](https://spark.apache.org/), or [Google Cloud Dataflow](https://cloud.google.com/dataflow).
We have already created Beam pipelines for some of the larger datasets like [wikipedia](https://huggingface.co/datasets/wikipedia), and [wiki40b](https://huggingface.co/datasets/wiki40b). You can load these normally with [`load_dataset`]. But if you want to run your own Beam pipeline with Dataflow, here is how:
1. Specify the dataset and configuration you want to process:
```
DATASET_NAME=your_dataset_name # ex: wikipedia
CONFIG_NAME=your_config_name # ex: 20220301.en
```
2. Input your Google Cloud Platform information:
```
PROJECT=your_project
BUCKET=your_bucket
REGION=your_region
```
3. Specify your Python requirements:
```
echo "datasets" > /tmp/beam_requirements.txt
echo "apache_beam" >> /tmp/beam_requirements.txt
```
4. Run the pipeline:
```
datasets-cli run_beam datasets/$DATASET_NAME \
--name $CONFIG_NAME \
--save_info \
--cache_dir gs://$BUCKET/cache/datasets \
--beam_pipeline_options=\
"runner=DataflowRunner,project=$PROJECT,job_name=$DATASET_NAME-gen,"\
"staging_location=gs://$BUCKET/binaries,temp_location=gs://$BUCKET/temp,"\
"region=$REGION,requirements_file=/tmp/beam_requirements.txt"
```
<Tip>
When you run your pipeline, you can adjust the parameters to change the runner (Flink or Spark), output location (S3 bucket or HDFS), and the number of workers.
</Tip>
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/how_to_metrics.mdx | # Metrics
<Tip warning={true}>
Metrics is deprecated in π€ Datasets. To learn more about how to use metrics, take a look at the library π€ [Evaluate](https://huggingface.co/docs/evaluate/index)! In addition to metrics, you can find more tools for evaluating models and datasets.
</Tip>
Metrics are important for evaluating a model's predictions. In the tutorial, you learned how to compute a metric over an entire evaluation set. You have also seen how to load a metric.
This guide will show you how to:
- Add predictions and references.
- Compute metrics using different methods.
- Write your own metric loading script.
## Add predictions and references
When you want to add model predictions and references to a [`Metric`] instance, you have two options:
- [`Metric.add`] adds a single `prediction` and `reference`.
- [`Metric.add_batch`] adds a batch of `predictions` and `references`.
Use [`Metric.add_batch`] by passing it your model predictions, and the references the model predictions should be evaluated against:
```py
>>> import datasets
>>> metric = datasets.load_metric('my_metric')
>>> for model_input, gold_references in evaluation_dataset:
... model_predictions = model(model_inputs)
... metric.add_batch(predictions=model_predictions, references=gold_references)
>>> final_score = metric.compute()
```
<Tip>
Metrics accepts various input formats (Python lists, NumPy arrays, PyTorch tensors, etc.) and converts them to an appropriate format for storage and computation.
</Tip>
## Compute scores
The most straightforward way to calculate a metric is to call [`Metric.compute`]. But some metrics have additional arguments that allow you to modify the metrics behavior.
Let's load the [SacreBLEU](https://huggingface.co/metrics/sacrebleu) metric, and compute it with a different smoothing method.
1. Load the SacreBLEU metric:
```py
>>> import datasets
>>> metric = datasets.load_metric('sacrebleu')
```
2. Inspect the different argument methods for computing the metric:
```py
>>> print(metric.inputs_description)
Produces BLEU scores along with its sufficient statistics
from a source against one or more references.
Args:
predictions: The system stream (a sequence of segments).
references: A list of one or more reference streams (each a sequence of segments).
smooth_method: The smoothing method to use. (Default: 'exp').
smooth_value: The smoothing value. Only valid for 'floor' and 'add-k'. (Defaults: floor: 0.1, add-k: 1).
tokenize: Tokenization method to use for BLEU. If not provided, defaults to 'zh' for Chinese, 'ja-mecab' for Japanese and '13a' (mteval) otherwise.
lowercase: Lowercase the data. If True, enables case-insensitivity. (Default: False).
force: Insist that your tokenized input is actually detokenized.
...
```
3. Compute the metric with the `floor` method, and a different `smooth_value`:
```py
>>> score = metric.compute(smooth_method="floor", smooth_value=0.2)
```
<a id='metric_script'></a>
## Custom metric loading script
Write a metric loading script to use your own custom metric (or one that is not on the Hub). Then you can load it as usual with [`load_metric`].
To help you get started, open the [SQuAD metric loading script](https://github.com/huggingface/datasets/blob/main/metrics/squad/squad.py) and follow along.
<Tip>
Get jump started with our metric loading script [template](https://github.com/huggingface/datasets/blob/main/templates/new_metric_script.py)!
</Tip>
### Add metric attributes
Start by adding some information about your metric in [`Metric._info`]. The most important attributes you should specify are:
1. [`MetricInfo.description`] provides a brief description about your metric.
2. [`MetricInfo.citation`] contains a BibTex citation for the metric.
3. [`MetricInfo.inputs_description`] describes the expected inputs and outputs. It may also provide an example usage of the metric.
4. [`MetricInfo.features`] defines the name and type of the predictions and references.
After you've filled out all these fields in the template, it should look like the following example from the SQuAD metric script:
```py
class Squad(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": {"id": datasets.Value("string"), "prediction_text": datasets.Value("string")},
"references": {
"id": datasets.Value("string"),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
}
),
},
}
),
codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"],
reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"],
)
```
### Download metric files
If your metric needs to download, or retrieve local files, you will need to use the [`Metric._download_and_prepare`] method. For this example, let's examine the [BLEURT metric loading script](https://github.com/huggingface/datasets/blob/main/metrics/bleurt/bleurt.py).
1. Provide a dictionary of URLs that point to the metric files:
```py
CHECKPOINT_URLS = {
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
}
```
<Tip>
If the files are stored locally, provide a dictionary of path(s) instead of URLs.
</Tip>
2. [`Metric._download_and_prepare`] will take the URLs and download the metric files specified:
```py
def _download_and_prepare(self, dl_manager):
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"Using default BLEURT-Base checkpoint for sequence maximum length 128. "
"You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512')."
)
self.config_name = "bleurt-base-128"
if self.config_name not in CHECKPOINT_URLS.keys():
raise KeyError(
f"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}"
)
# download the model checkpoint specified by self.config_name and set up the scorer
model_path = dl_manager.download_and_extract(CHECKPOINT_URLS[self.config_name])
self.scorer = score.BleurtScorer(os.path.join(model_path, self.config_name))
```
### Compute score
[`DatasetBuilder._compute`] provides the actual instructions for how to compute a metric given the predictions and references. Now let's take a look at the [GLUE metric loading script](https://github.com/huggingface/datasets/blob/main/metrics/glue/glue.py).
1. Provide the functions for [`DatasetBuilder._compute`] to calculate your metric:
```py
def simple_accuracy(preds, labels):
return (preds == labels).mean().item()
def acc_and_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds).item()
return {
"accuracy": acc,
"f1": f1,
}
def pearson_and_spearman(preds, labels):
pearson_corr = pearsonr(preds, labels)[0].item()
spearman_corr = spearmanr(preds, labels)[0].item()
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
```
2. Create [`DatasetBuilder._compute`] with instructions for what metric to calculate for each configuration:
```py
def _compute(self, predictions, references):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(references, predictions)}
elif self.config_name == "stsb":
return pearson_and_spearman(predictions, references)
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_f1(predictions, references)
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(predictions, references)}
else:
raise KeyError(
"You should supply a configuration name selected in "
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]'
)
```
### Test
Once you're finished writing your metric loading script, try to load it locally:
```py
>>> from datasets import load_metric
>>> metric = load_metric('PATH/TO/MY/SCRIPT.py')
```
| 0 |
hf_public_repos/datasets/docs | hf_public_repos/datasets/docs/source/about_dataset_features.mdx | # Dataset features
[`Features`] defines the internal structure of a dataset. It is used to specify the underlying serialization format. What's more interesting to you though is that [`Features`] contains high-level information about everything from the column names and types, to the [`ClassLabel`]. You can think of [`Features`] as the backbone of a dataset.
The [`Features`] format is simple: `dict[column_name, column_type]`. It is a dictionary of column name and column type pairs. The column type provides a wide range of options for describing the type of data you have.
Let's have a look at the features of the MRPC dataset from the GLUE benchmark:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset('glue', 'mrpc', split='train')
>>> dataset.features
{'idx': Value(dtype='int32', id=None),
'label': ClassLabel(num_classes=2, names=['not_equivalent', 'equivalent'], names_file=None, id=None),
'sentence1': Value(dtype='string', id=None),
'sentence2': Value(dtype='string', id=None),
}
```
The [`Value`] feature tells π€ Datasets:
- The `idx` data type is `int32`.
- The `sentence1` and `sentence2` data types are `string`.
π€ Datasets supports many other data types such as `bool`, `float32` and `binary` to name just a few.
<Tip>
Refer to [`Value`] for a full list of supported data types.
</Tip>
The [`ClassLabel`] feature informs π€ Datasets the `label` column contains two classes. The classes are labeled `not_equivalent` and `equivalent`. Labels are stored as integers in the dataset. When you retrieve the labels, [`ClassLabel.int2str`] and [`ClassLabel.str2int`] carries out the conversion from integer value to label name, and vice versa.
If your data type contains a list of objects, then you want to use the [`Sequence`] feature. Remember the SQuAD dataset?
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset('squad', split='train')
>>> dataset.features
{'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None),
'context': Value(dtype='string', id=None),
'id': Value(dtype='string', id=None),
'question': Value(dtype='string', id=None),
'title': Value(dtype='string', id=None)}
```
The `answers` field is constructed using the [`Sequence`] feature because it contains two subfields, `text` and `answer_start`, which are lists of `string` and `int32`, respectively.
<Tip>
See the [flatten](./process#flatten) section to learn how you can extract the nested subfields as their own independent columns.
</Tip>
The array feature type is useful for creating arrays of various sizes. You can create arrays with two dimensions using [`Array2D`], and even arrays with five dimensions using [`Array5D`].
```py
>>> features = Features({'a': Array2D(shape=(1, 3), dtype='int32')})
```
The array type also allows the first dimension of the array to be dynamic. This is useful for handling sequences with variable lengths such as sentences, without having to pad or truncate the input to a uniform shape.
```py
>>> features = Features({'a': Array3D(shape=(None, 5, 2), dtype='int32')})
```
## Audio feature
Audio datasets have a column with type [`Audio`], which contains three important fields:
* `array`: the decoded audio data represented as a 1-dimensional array.
* `path`: the path to the downloaded audio file.
* `sampling_rate`: the sampling rate of the audio data.
When you load an audio dataset and call the audio column, the [`Audio`] feature automatically decodes and resamples the audio file:
```py
>>> from datasets import load_dataset, Audio
>>> dataset = load_dataset("PolyAI/minds14", "en-US", split="train")
>>> dataset[0]["audio"]
{'array': array([ 0. , 0.00024414, -0.00024414, ..., -0.00024414,
0. , 0. ], dtype=float32),
'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav',
'sampling_rate': 8000}
```
<Tip warning={true}>
Index into an audio dataset using the row index first and then the `audio` column - `dataset[0]["audio"]` - to avoid decoding and resampling all the audio files in the dataset. Otherwise, this can be a slow and time-consuming process if you have a large dataset.
</Tip>
With `decode=False`, the [`Audio`] type simply gives you the path or the bytes of the audio file, without decoding it into an `array`,
```py
>>> dataset = load_dataset("PolyAI/minds14", "en-US", split="train").cast_column("audio", Audio(decode=False))
>>> dataset[0]
{'audio': {'bytes': None,
'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav'},
'english_transcription': 'I would like to set up a joint account with my partner',
'intent_class': 11,
'lang_id': 4,
'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav',
'transcription': 'I would like to set up a joint account with my partner'}
```
## Image feature
Image datasets have a column with type [`Image`], which loads `PIL.Image` objects from images stored as bytes:
When you load an image dataset and call the image column, the [`Image`] feature automatically decodes the image file:
```py
>>> from datasets import load_dataset, Image
>>> dataset = load_dataset("beans", split="train")
>>> dataset[0]["image"]
<PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=500x500 at 0x125506CF8>
```
<Tip warning={true}>
Index into an image dataset using the row index first and then the `image` column - `dataset[0]["image"]` - to avoid decoding all the image files in the dataset. Otherwise, this can be a slow and time-consuming process if you have a large dataset.
</Tip>
With `decode=False`, the [`Image`] type simply gives you the path or the bytes of the image file, without decoding it into an `PIL.Image`,
```py
>>> dataset = load_dataset("beans", split="train").cast_column("image", Image(decode=False))
>>> dataset[0]["image"]
{'bytes': None,
'path': '/Users/username/.cache/huggingface/datasets/downloads/extracted/772e7c1fba622cff102b85dd74bcce46e8168634df4eaade7bedd3b8d91d3cd7/train/healthy/healthy_train.265.jpg'}
```
Depending on the dataset, you may get the path to the local downloaded image, or the content of the image as bytes if the dataset is not made of individual files.
You can also define a dataset of images from numpy arrays:
```python
>>> ds = Dataset.from_dict({"i": [np.zeros(shape=(16, 16, 3), dtype=np.uint8)]}, features=Features({"i": Image()}))
```
And in this case the numpy arrays are encoded into PNG (or TIFF if the pixels values precision is important).
For multi-channels arrays like RGB or RGBA, only uint8 is supported. If you use a larger precision, you get a warning and the array is downcasted to uint8.
For gray-scale images you can use the integer or float precision you want as long as it is compatible with `Pillow`. A warning is shown if your image integer or float precision is too high, and in this case the array is downcated: an int64 array is downcasted to int32, and a float64 array is downcasted to float32.
| 0 |
hf_public_repos/datasets/docs/source | hf_public_repos/datasets/docs/source/package_reference/task_templates.mdx | # Task templates
<Tip warning={true}>
The Task API is deprecated in favor of [`train-eval-index`](https://github.com/huggingface/hub-docs/blob/9ab2555e1c146122056aba6f89af404a8bc9a6f1/datasetcard.md?plain=1#L90-L106) and will be removed in the next major release.
</Tip>
The tasks supported by [`Dataset.prepare_for_task`] and [`DatasetDict.prepare_for_task`].
[[autodoc]] datasets.tasks.AutomaticSpeechRecognition
[[autodoc]] datasets.tasks.AudioClassification
[[autodoc]] datasets.tasks.ImageClassification
- align_with_features
[[autodoc]] datasets.tasks.LanguageModeling
[[autodoc]] datasets.tasks.QuestionAnsweringExtractive
[[autodoc]] datasets.tasks.Summarization
[[autodoc]] datasets.tasks.TextClassification
- align_with_features
| 0 |
hf_public_repos/datasets/docs/source | hf_public_repos/datasets/docs/source/package_reference/logging_methods.mdx | # Logging methods
π€ Datasets strives to be transparent and explicit about how it works, but this can be quite verbose at times. We have included a series of logging methods which allow you to easily adjust the level of verbosity of the entire library. Currently the default verbosity of the library is set to `WARNING`.
To change the level of verbosity, use one of the direct setters. For instance, here is how to change the verbosity to the `INFO` level:
```py
import datasets
datasets.logging.set_verbosity_info()
```
You can also use the environment variable `DATASETS_VERBOSITY` to override the default verbosity, and set it to one of the following: `debug`, `info`, `warning`, `error`, `critical`:
```bash
DATASETS_VERBOSITY=error ./myprogram.py
```
All the methods of this logging module are documented below. The main ones are:
- [`logging.get_verbosity`] to get the current level of verbosity in the logger
- [`logging.set_verbosity`] to set the verbosity to the level of your choice
In order from the least to the most verbose (with their corresponding `int` values):
1. `logging.CRITICAL` or `logging.FATAL` (int value, 50): only report the most critical errors.
2. `logging.ERROR` (int value, 40): only report errors.
3. `logging.WARNING` or `logging.WARN` (int value, 30): only reports error and warnings. This the default level used by the library.
4. `logging.INFO` (int value, 20): reports error, warnings and basic information.
5. `logging.DEBUG` (int value, 10): report all information.
By default, `tqdm` progress bars will be displayed during dataset download and preprocessing. [`logging.disable_progress_bar`] and [`logging.enable_progress_bar`] can be used to suppress or unsuppress this behavior.
## Functions
[[autodoc]] datasets.logging.get_verbosity
[[autodoc]] datasets.logging.set_verbosity
[[autodoc]] datasets.logging.set_verbosity_info
[[autodoc]] datasets.logging.set_verbosity_warning
[[autodoc]] datasets.logging.set_verbosity_debug
[[autodoc]] datasets.logging.set_verbosity_error
[[autodoc]] datasets.logging.disable_propagation
[[autodoc]] datasets.logging.enable_propagation
[[autodoc]] datasets.logging.get_logger
[[autodoc]] datasets.logging.enable_progress_bar
[[autodoc]] datasets.logging.disable_progress_bar
[[autodoc]] datasets.is_progress_bar_enabled
## Levels
### datasets.logging.CRITICAL
datasets.logging.CRITICAL = 50
### datasets.logging.DEBUG
datasets.logging.DEBUG = 10
### datasets.logging.ERROR
datasets.logging.ERROR = 40
### datasets.logging.FATAL
datasets.logging.FATAL = 50
### datasets.logging.INFO
datasets.logging.INFO = 20
### datasets.logging.NOTSET
datasets.logging.NOTSET = 0
### datasets.logging.WARN
datasets.logging.WARN = 30
### datasets.logging.WARNING
datasets.logging.WARNING = 30
| 0 |
hf_public_repos/datasets/docs/source | hf_public_repos/datasets/docs/source/package_reference/main_classes.mdx | # Main classes
## DatasetInfo
[[autodoc]] datasets.DatasetInfo
## Dataset
The base class [`Dataset`] implements a Dataset backed by an Apache Arrow table.
[[autodoc]] datasets.Dataset
- add_column
- add_item
- from_file
- from_buffer
- from_pandas
- from_dict
- from_generator
- data
- cache_files
- num_columns
- num_rows
- column_names
- shape
- unique
- flatten
- cast
- cast_column
- remove_columns
- rename_column
- rename_columns
- select_columns
- class_encode_column
- __len__
- __iter__
- iter
- formatted_as
- set_format
- set_transform
- reset_format
- with_format
- with_transform
- __getitem__
- cleanup_cache_files
- map
- filter
- select
- sort
- shuffle
- train_test_split
- shard
- to_tf_dataset
- push_to_hub
- save_to_disk
- load_from_disk
- flatten_indices
- to_csv
- to_pandas
- to_dict
- to_json
- to_parquet
- to_sql
- add_faiss_index
- add_faiss_index_from_external_arrays
- save_faiss_index
- load_faiss_index
- add_elasticsearch_index
- load_elasticsearch_index
- list_indexes
- get_index
- drop_index
- search
- search_batch
- get_nearest_examples
- get_nearest_examples_batch
- info
- split
- builder_name
- citation
- config_name
- dataset_size
- description
- download_checksums
- download_size
- features
- homepage
- license
- size_in_bytes
- supervised_keys
- version
- from_csv
- from_json
- from_parquet
- from_text
- from_sql
- prepare_for_task
- align_labels_with_mapping
[[autodoc]] datasets.concatenate_datasets
[[autodoc]] datasets.interleave_datasets
[[autodoc]] datasets.distributed.split_dataset_by_node
[[autodoc]] datasets.enable_caching
[[autodoc]] datasets.disable_caching
[[autodoc]] datasets.is_caching_enabled
## DatasetDict
Dictionary with split names as keys ('train', 'test' for example), and `Dataset` objects as values.
It also has dataset transform methods like map or filter, to process all the splits at once.
[[autodoc]] datasets.DatasetDict
- data
- cache_files
- num_columns
- num_rows
- column_names
- shape
- unique
- cleanup_cache_files
- map
- filter
- sort
- shuffle
- set_format
- reset_format
- formatted_as
- with_format
- with_transform
- flatten
- cast
- cast_column
- remove_columns
- rename_column
- rename_columns
- select_columns
- class_encode_column
- push_to_hub
- save_to_disk
- load_from_disk
- from_csv
- from_json
- from_parquet
- from_text
- prepare_for_task
<a id='package_reference_features'></a>
## IterableDataset
The base class [`IterableDataset`] implements an iterable Dataset backed by python generators.
[[autodoc]] datasets.IterableDataset
- from_generator
- remove_columns
- select_columns
- cast_column
- cast
- __iter__
- iter
- map
- rename_column
- filter
- shuffle
- skip
- take
- info
- split
- builder_name
- citation
- config_name
- dataset_size
- description
- download_checksums
- download_size
- features
- homepage
- license
- size_in_bytes
- supervised_keys
- version
## IterableDatasetDict
Dictionary with split names as keys ('train', 'test' for example), and `IterableDataset` objects as values.
[[autodoc]] datasets.IterableDatasetDict
- map
- filter
- shuffle
- with_format
- cast
- cast_column
- remove_columns
- rename_column
- rename_columns
- select_columns
## Features
[[autodoc]] datasets.Features
[[autodoc]] datasets.Sequence
[[autodoc]] datasets.ClassLabel
[[autodoc]] datasets.Value
[[autodoc]] datasets.Translation
[[autodoc]] datasets.TranslationVariableLanguages
[[autodoc]] datasets.Array2D
[[autodoc]] datasets.Array3D
[[autodoc]] datasets.Array4D
[[autodoc]] datasets.Array5D
[[autodoc]] datasets.Audio
[[autodoc]] datasets.Image
## MetricInfo
[[autodoc]] datasets.MetricInfo
## Metric
The base class `Metric` implements a Metric backed by one or several [`Dataset`].
[[autodoc]] datasets.Metric
## Filesystems
[[autodoc]] datasets.filesystems.S3FileSystem
[[autodoc]] datasets.filesystems.extract_path_from_uri
[[autodoc]] datasets.filesystems.is_remote_filesystem
## Fingerprint
[[autodoc]] datasets.fingerprint.Hasher
| 0 |
hf_public_repos/datasets/docs/source | hf_public_repos/datasets/docs/source/package_reference/loading_methods.mdx | # Loading methods
Methods for listing and loading datasets and metrics:
## Datasets
[[autodoc]] datasets.list_datasets
[[autodoc]] datasets.load_dataset
[[autodoc]] datasets.load_from_disk
[[autodoc]] datasets.load_dataset_builder
[[autodoc]] datasets.get_dataset_config_names
[[autodoc]] datasets.get_dataset_infos
[[autodoc]] datasets.get_dataset_split_names
[[autodoc]] datasets.inspect_dataset
## Metrics
<Tip warning={true}>
Metrics is deprecated in π€ Datasets. To learn more about how to use metrics, take a look at the library π€ [Evaluate](https://huggingface.co/docs/evaluate/index)! In addition to metrics, you can find more tools for evaluating models and datasets.
</Tip>
[[autodoc]] datasets.list_metrics
[[autodoc]] datasets.load_metric
[[autodoc]] datasets.inspect_metric
## From files
Configurations used to load data files.
They are used when loading local files or a dataset repository:
- local files: `load_dataset("parquet", data_dir="path/to/data/dir")`
- dataset repository: `load_dataset("allenai/c4")`
You can pass arguments to `load_dataset` to configure data loading.
For example you can specify the `sep` parameter to define the [`~datasets.packaged_modules.csv.CsvConfig`] that is used to load the data:
```python
load_dataset("csv", data_dir="path/to/data/dir", sep="\t")
```
### Text
[[autodoc]] datasets.packaged_modules.text.TextConfig
[[autodoc]] datasets.packaged_modules.text.Text
### CSV
[[autodoc]] datasets.packaged_modules.csv.CsvConfig
[[autodoc]] datasets.packaged_modules.csv.Csv
### JSON
[[autodoc]] datasets.packaged_modules.json.JsonConfig
[[autodoc]] datasets.packaged_modules.json.Json
### Parquet
[[autodoc]] datasets.packaged_modules.parquet.ParquetConfig
[[autodoc]] datasets.packaged_modules.parquet.Parquet
### Arrow
[[autodoc]] datasets.packaged_modules.arrow.ArrowConfig
[[autodoc]] datasets.packaged_modules.arrow.Arrow
### SQL
[[autodoc]] datasets.packaged_modules.sql.SqlConfig
[[autodoc]] datasets.packaged_modules.sql.Sql
### Images
[[autodoc]] datasets.packaged_modules.imagefolder.ImageFolderConfig
[[autodoc]] datasets.packaged_modules.imagefolder.ImageFolder
### Audio
[[autodoc]] datasets.packaged_modules.audiofolder.AudioFolderConfig
[[autodoc]] datasets.packaged_modules.audiofolder.AudioFolder
| 0 |
hf_public_repos/datasets/docs/source | hf_public_repos/datasets/docs/source/package_reference/table_classes.mdx | # Table Classes
Each `Dataset` object is backed by a PyArrow Table.
A Table can be loaded from either the disk (memory mapped) or in memory.
Several Table types are available, and they all inherit from [`table.Table`].
## Table
[[autodoc]] datasets.table.Table
- validate
- equals
- to_batches
- to_pydict
- to_pandas
- to_string
- field
- column
- itercolumns
- schema
- columns
- num_columns
- num_rows
- shape
- nbytes
## InMemoryTable
[[autodoc]] datasets.table.InMemoryTable
- validate
- equals
- to_batches
- to_pydict
- to_pandas
- to_string
- field
- column
- itercolumns
- schema
- columns
- num_columns
- num_rows
- shape
- nbytes
- column_names
- slice
- filter
- flatten
- combine_chunks
- cast
- replace_schema_metadata
- add_column
- append_column
- remove_column
- set_column
- rename_columns
- select
- drop
- from_file
- from_buffer
- from_pandas
- from_arrays
- from_pydict
- from_batches
## MemoryMappedTable
[[autodoc]] datasets.table.MemoryMappedTable
- validate
- equals
- to_batches
- to_pydict
- to_pandas
- to_string
- field
- column
- itercolumns
- schema
- columns
- num_columns
- num_rows
- shape
- nbytes
- column_names
- slice
- filter
- flatten
- combine_chunks
- cast
- replace_schema_metadata
- add_column
- append_column
- remove_column
- set_column
- rename_columns
- select
- drop
- from_file
## ConcatenationTable
[[autodoc]] datasets.table.ConcatenationTable
- validate
- equals
- to_batches
- to_pydict
- to_pandas
- to_string
- field
- column
- itercolumns
- schema
- columns
- num_columns
- num_rows
- shape
- nbytes
- column_names
- slice
- filter
- flatten
- combine_chunks
- cast
- replace_schema_metadata
- add_column
- append_column
- remove_column
- set_column
- rename_columns
- select
- drop
- from_blocks
- from_tables
## Utils
[[autodoc]] datasets.table.concat_tables
[[autodoc]] datasets.table.list_table_cache_files
| 0 |
hf_public_repos/datasets/docs/source | hf_public_repos/datasets/docs/source/package_reference/builder_classes.mdx | # Builder classes
## Builders
π€ Datasets relies on two main classes during the dataset building process: [`DatasetBuilder`] and [`BuilderConfig`].
[[autodoc]] datasets.DatasetBuilder
[[autodoc]] datasets.GeneratorBasedBuilder
[[autodoc]] datasets.BeamBasedBuilder
[[autodoc]] datasets.ArrowBasedBuilder
[[autodoc]] datasets.BuilderConfig
## Download
[[autodoc]] datasets.DownloadManager
[[autodoc]] datasets.StreamingDownloadManager
[[autodoc]] datasets.DownloadConfig
[[autodoc]] datasets.DownloadMode
## Verification
[[autodoc]] datasets.VerificationMode
## Splits
[[autodoc]] datasets.SplitGenerator
[[autodoc]] datasets.Split
[[autodoc]] datasets.NamedSplit
[[autodoc]] datasets.NamedSplitAll
[[autodoc]] datasets.ReadInstruction
## Version
[[autodoc]] datasets.utils.Version
| 0 |
hf_public_repos/datasets | hf_public_repos/datasets/templates/README.md | ---
TODO: Add YAML tags here. Copy-paste the tags obtained with the online tagging app: https://huggingface.co/spaces/huggingface/datasets-tagging
---
# Dataset Card for [Dataset Name]
## Table of Contents
- [Table of Contents](#table-of-contents)
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:**
- **Repository:**
- **Paper:**
- **Leaderboard:**
- **Point of Contact:**
### Dataset Summary
[More Information Needed]
### Supported Tasks and Leaderboards
[More Information Needed]
### Languages
[More Information Needed]
## Dataset Structure
### Data Instances
[More Information Needed]
### Data Fields
[More Information Needed]
### Data Splits
[More Information Needed]
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
[More Information Needed]
### Annotations
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
[More Information Needed]
### Citation Information
[More Information Needed]
### Contributions
Thanks to [@github-username](https://github.com/<github-username>) for adding this dataset.
| 0 |
hf_public_repos/datasets | hf_public_repos/datasets/templates/new_dataset_script.py | # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Address all TODOs and remove all explanatory comments
"""TODO: Add a description here."""
import csv
import json
import os
import datasets
# TODO: Add BibTeX citation
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {A great new dataset},
author={huggingface, Inc.
},
year={2020}
}
"""
# TODO: Add description of the dataset here
# You can copy an official description
_DESCRIPTION = """\
This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
"""
# TODO: Add a link to an official homepage for the dataset here
_HOMEPAGE = ""
# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""
# TODO: Add link to the official dataset URLs here
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URLS = {
"first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
"second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
}
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
class NewDataset(datasets.GeneratorBasedBuilder):
"""TODO: Short description of my dataset."""
VERSION = datasets.Version("1.1.0")
# This is an example of a dataset with multiple configurations.
# If you don't want/need to define several sub-sets in your dataset,
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
# If you need to make complex sub-parts in the datasets with configurable options
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
# BUILDER_CONFIG_CLASS = MyBuilderConfig
# You will be able to load one or the other configurations in the following list with
# data = datasets.load_dataset('my_dataset', 'first_domain')
# data = datasets.load_dataset('my_dataset', 'second_domain')
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"),
datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"),
]
DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
def _info(self):
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
if self.config.name == "first_domain": # This is the name of the configuration selected in BUILDER_CONFIGS above
features = datasets.Features(
{
"sentence": datasets.Value("string"),
"option1": datasets.Value("string"),
"answer": datasets.Value("string")
# These are the features of your dataset like images, labels ...
}
)
else: # This is an example to show how to have different features for "first_domain" and "second_domain"
features = datasets.Features(
{
"sentence": datasets.Value("string"),
"option2": datasets.Value("string"),
"second_domain_answer": datasets.Value("string")
# These are the features of your dataset like images, labels ...
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
# supervised_keys=("sentence", "label"),
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager):
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
urls = _URLS[self.config.name]
data_dir = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(data_dir, "train.jsonl"),
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(data_dir, "dev.jsonl"),
"split": "dev",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(data_dir, "test.jsonl"),
"split": "test"
},
),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, filepath, split):
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
with open(filepath, encoding="utf-8") as f:
for key, row in enumerate(f):
data = json.loads(row)
if self.config.name == "first_domain":
# Yields examples as (key, example) tuples
yield key, {
"sentence": data["sentence"],
"option1": data["option1"],
"answer": "" if split == "test" else data["answer"],
}
else:
yield key, {
"sentence": data["sentence"],
"option2": data["option2"],
"second_domain_answer": "" if split == "test" else data["second_domain_answer"],
}
| 0 |
hf_public_repos/datasets | hf_public_repos/datasets/templates/README_guide.md | ---
YAML tags (full spec here: https://github.com/huggingface/hub-docs/blob/main/datasetcard.md?plain=1):
- copy-paste the tags obtained with the online tagging app: https://huggingface.co/spaces/huggingface/datasets-tagging
---
# Dataset Card Creation Guide
## Table of Contents
- [Dataset Card Creation Guide](#dataset-card-creation-guide)
- [Table of Contents](#table-of-contents)
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
- [Who are the source language producers?](#who-are-the-source-language-producers)
- [Annotations](#annotations)
- [Annotation process](#annotation-process)
- [Who are the annotators?](#who-are-the-annotators)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** [Add homepage URL here if available (unless it's a GitHub repository)]()
- **Repository:** [If the dataset is hosted on github or has a github homepage, add URL here]()
- **Paper:** [If the dataset was introduced by a paper or there was a paper written describing the dataset, add URL here (landing page for Arxiv paper preferred)]()
- **Leaderboard:** [If the dataset supports an active leaderboard, add link here]()
- **Point of Contact:** [If known, name and email of at least one person the reader can contact for questions about the dataset.]()
### Dataset Summary
Briefly summarize the dataset, its intended use and the supported tasks. Give an overview of how and why the dataset was created. The summary should explicitly mention the languages present in the dataset (possibly in broad terms, e.g. *translations between several pairs of European languages*), and describe the domain, topic, or genre covered.
### Supported Tasks and Leaderboards
For each of the tasks tagged for this dataset, give a brief description of the tag, metrics, and suggested models (with a link to their HuggingFace implementation if available). Give a similar description of tasks that were not covered by the structured tag set (repace the `task-category-tag` with an appropriate `other:other-task-name`).
- `task-category-tag`: The dataset can be used to train a model for [TASK NAME], which consists in [TASK DESCRIPTION]. Success on this task is typically measured by achieving a *high/low* [metric name](https://huggingface.co/metrics/metric_name). The ([model name](https://huggingface.co/model_name) or [model class](https://huggingface.co/transformers/model_doc/model_class.html)) model currently achieves the following score. *[IF A LEADERBOARD IS AVAILABLE]:* This task has an active leaderboard which can be found at [leaderboard url]() and ranks models based on [metric name](https://huggingface.co/metrics/metric_name) while also reporting [other metric name](https://huggingface.co/metrics/other_metric_name).
### Languages
Provide a brief overview of the languages represented in the dataset. Describe relevant details about specifics of the language such as whether it is social media text, African American English,...
When relevant, please provide [BCP-47 codes](https://tools.ietf.org/html/bcp47), which consist of a [primary language subtag](https://tools.ietf.org/html/bcp47#section-2.2.1), with a [script subtag](https://tools.ietf.org/html/bcp47#section-2.2.3) and/or [region subtag](https://tools.ietf.org/html/bcp47#section-2.2.4) if available.
## Dataset Structure
### Data Instances
Provide an JSON-formatted example and brief description of a typical instance in the dataset. If available, provide a link to further examples.
```
{
'example_field': ...,
...
}
```
Provide any additional information that is not covered in the other sections about the data here. In particular describe any relationships between data points and if these relationships are made explicit.
### Data Fields
List and describe the fields present in the dataset. Mention their data type, and whether they are used as input or output in any of the tasks the dataset currently supports. If the data has span indices, describe their attributes, such as whether they are at the character level or word level, whether they are contiguous or not, etc. If the datasets contains example IDs, state whether they have an inherent meaning, such as a mapping to other datasets or pointing to relationships between data points.
- `example_field`: description of `example_field`
Note that the descriptions can be initialized with the **Show Markdown Data Fields** output of the [Datasets Tagging app](https://huggingface.co/spaces/huggingface/datasets-tagging), you will then only need to refine the generated descriptions.
### Data Splits
Describe and name the splits in the dataset if there are more than one.
Describe any criteria for splitting the data, if used. If there are differences between the splits (e.g. if the training annotations are machine-generated and the dev and test ones are created by humans, or if different numbers of annotators contributed to each example), describe them here.
Provide the sizes of each split. As appropriate, provide any descriptive statistics for the features, such as average length. For example:
| | train | validation | test |
|-------------------------|------:|-----------:|-----:|
| Input Sentences | | | |
| Average Sentence Length | | | |
## Dataset Creation
### Curation Rationale
What need motivated the creation of this dataset? What are some of the reasons underlying the major choices involved in putting it together?
### Source Data
This section describes the source data (e.g. news text and headlines, social media posts, translated sentences,...)
#### Initial Data Collection and Normalization
Describe the data collection process. Describe any criteria for data selection or filtering. List any key words or search terms used. If possible, include runtime information for the collection process.
If data was collected from other pre-existing datasets, link to source here and to their [Hugging Face version](https://huggingface.co/datasets/dataset_name).
If the data was modified or normalized after being collected (e.g. if the data is word-tokenized), describe the process and the tools used.
#### Who are the source language producers?
State whether the data was produced by humans or machine generated. Describe the people or systems who originally created the data.
If available, include self-reported demographic or identity information for the source data creators, but avoid inferring this information. Instead state that this information is unknown. See [Larson 2017](https://www.aclweb.org/anthology/W17-1601.pdf) for using identity categories as a variables, particularly gender.
Describe the conditions under which the data was created (for example, if the producers were crowdworkers, state what platform was used, or if the data was found, what website the data was found on). If compensation was provided, include that information here.
Describe other people represented or mentioned in the data. Where possible, link to references for the information.
### Annotations
If the dataset contains annotations which are not part of the initial data collection, describe them in the following paragraphs.
#### Annotation process
If applicable, describe the annotation process and any tools used, or state otherwise. Describe the amount of data annotated, if not all. Describe or reference annotation guidelines provided to the annotators. If available, provide interannotator statistics. Describe any annotation validation processes.
#### Who are the annotators?
If annotations were collected for the source data (such as class labels or syntactic parses), state whether the annotations were produced by humans or machine generated.
Describe the people or systems who originally created the annotations and their selection criteria if applicable.
If available, include self-reported demographic or identity information for the annotators, but avoid inferring this information. Instead state that this information is unknown. See [Larson 2017](https://www.aclweb.org/anthology/W17-1601.pdf) for using identity categories as a variables, particularly gender.
Describe the conditions under which the data was annotated (for example, if the annotators were crowdworkers, state what platform was used, or if the data was found, what website the data was found on). If compensation was provided, include that information here.
### Personal and Sensitive Information
State whether the dataset uses identity categories and, if so, how the information is used. Describe where this information comes from (i.e. self-reporting, collecting from profiles, inferring, etc.). See [Larson 2017](https://www.aclweb.org/anthology/W17-1601.pdf) for using identity categories as a variables, particularly gender. State whether the data is linked to individuals and whether those individuals can be identified in the dataset, either directly or indirectly (i.e., in combination with other data).
State whether the dataset contains other data that might be considered sensitive (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history).
If efforts were made to anonymize the data, describe the anonymization process.
## Considerations for Using the Data
### Social Impact of Dataset
Please discuss some of the ways you believe the use of this dataset will impact society.
The statement should include both positive outlooks, such as outlining how technologies developed through its use may improve people's lives, and discuss the accompanying risks. These risks may range from making important decisions more opaque to people who are affected by the technology, to reinforcing existing harmful biases (whose specifics should be discussed in the next section), among other considerations.
Also describe in this section if the proposed dataset contains a low-resource or under-represented language. If this is the case or if this task has any impact on underserved communities, please elaborate here.
### Discussion of Biases
Provide descriptions of specific biases that are likely to be reflected in the data, and state whether any steps were taken to reduce their impact.
For Wikipedia text, see for example [Dinan et al 2020 on biases in Wikipedia (esp. Table 1)](https://arxiv.org/abs/2005.00614), or [Blodgett et al 2020](https://www.aclweb.org/anthology/2020.acl-main.485/) for a more general discussion of the topic.
If analyses have been run quantifying these biases, please add brief summaries and links to the studies here.
### Other Known Limitations
If studies of the datasets have outlined other limitations of the dataset, such as annotation artifacts, please outline and cite them here.
## Additional Information
### Dataset Curators
List the people involved in collecting the dataset and their affiliation(s). If funding information is known, include it here.
### Licensing Information
Provide the license and link to the license webpage if available.
### Citation Information
Provide the [BibTex](http://www.bibtex.org/)-formatted reference for the dataset. For example:
```
@article{article_id,
author = {Author List},
title = {Dataset Paper Title},
journal = {Publication Venue},
year = {2525}
}
```
If the dataset has a [DOI](https://www.doi.org/), please provide it here.
### Contributions
Thanks to [@github-username](https://github.com/<github-username>) for adding this dataset.
| 0 |
hf_public_repos/datasets | hf_public_repos/datasets/templates/metric_card_template.md | # Metric Card for *Current Metric*
***Metric Card Instructions:*** *Copy this file into the relevant metric folder, then fill it out and save it as README.md. Feel free to take a look at existing metric cards if you'd like examples.*
## Metric Description
*Give a brief overview of this metric.*
## How to Use
*Give general statement of how to use the metric*
*Provide simplest possible example for using the metric*
### Inputs
*List all input arguments in the format below*
- **input_field** *(type): Definition of input, with explanation if necessary. State any default value(s).*
### Output Values
*Explain what this metric outputs (e.g. a single score, a list of scores)*
*Give an example of what the metric output looks like.*
*State the range of possible values that the metric's output can take, as well as what in that range is considered good. For example: "This metric can take on any value between 0 and 100, inclusive. Higher scores are better."*
#### Values from Popular Papers
*Give examples, preferrably with links, to papers that have reported this metric, along with the values they have reported.*
### Examples
*Give code examples of the metric being used. Try to include examples that clear up any potential ambiguity left from the metric description above. If possible, provide a range of examples that show both typical and atypical results, as well as examples where a variety of input parameters are passed.*
## Limitations and Bias
*Note any known limitations or biases that the metric has, with links and references if possible.*
## Citation
*Cite the source where this metric was introduced.*
## Further References
*Add any useful further references.*
| 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.