Datasets:
File size: 3,166 Bytes
de8bb50 52178ed de8bb50 d87d069 de8bb50 ca7fb7b de8bb50 0cef317 de8bb50 52178ed de8bb50 52178ed de8bb50 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 |
from datetime import date
from pathlib import Path
from typing import Any, Literal
import pytest
from datasets import load_dataset
from pydantic import AfterValidator, BaseModel, BeforeValidator
from typing_extensions import Annotated
from .conftest import DATASET_NAMES
from .readme_parsing import get_tag_idx, read_frontmatter_and_body
def ensure_tuple(created: str | tuple) -> tuple:
if isinstance(created, str):
return tuple(created.split(", "))
return created
def validate_sample_metadata(metadata: dict[str, Any]) -> dict[str, Any]:
if "source-pretty" not in metadata:
raise ValueError("'source-pretty' should be in metadata dict.")
return metadata
class SampleSchema(BaseModel):
text: str
source: str
id: str
added: date # date.fromisoformat
created: Annotated[tuple[date, date], BeforeValidator(ensure_tuple)]
license: str # TODO: should probably be a literal
domain: str # TODO: convert to literal
metadata: Annotated[dict[str, Any], AfterValidator(validate_sample_metadata)]
@pytest.mark.parametrize("dataset_name", DATASET_NAMES)
def test_sample_schema(repo_path: Path, dataset_name: str):
"""Ensure that the dataset samples follow the correct schema"""
ds = load_dataset(
str(repo_path.resolve()), dataset_name, split="train", streaming=True
)
sample = next(iter(ds))
SampleSchema(**sample)
class FrontmatterSchema(BaseModel):
pretty_name: str
language: list[Literal["da"]]
license: Literal["cc0-1.0", "other", "cc-by-sa-4.0"]
@pytest.mark.parametrize("dataset_name", DATASET_NAMES)
def test_dataset_readme(repo_path: Path, dataset_name: str):
"""tests that the dataset frontmatter and markdown follows the correct format."""
readme = repo_path / "data" / dataset_name / f"{dataset_name}.md"
frontmatter, body = read_frontmatter_and_body(readme)
frontmatter_validated = FrontmatterSchema(**frontmatter)
# ensure tags:
tags = ["SHORT DESCRIPTION", "DESC-STATS", "DATASET PLOTS", "SAMPLE"]
for tag in tags:
get_tag_idx(body, tag)
h2_headings = {line for line in body.splitlines() if line.startswith("## ")}
if (
frontmatter_validated.license == "other"
): # ensure description of underspecified licenses
assert "## License Information" in h2_headings
# required headings
req_h2_headings = ["## Dataset Description", "## Additional Information"]
for req_h2 in req_h2_headings:
assert req_h2 in h2_headings
pass
@pytest.mark.parametrize("dataset_name", DATASET_NAMES)
def test_dataset_folder_structure(repo_path: Path, dataset_name: str):
"""tests that the dataset folder structure is as follows.
dataset_name
|- dataset_name.md
|- dataset_name.parquet
If there is a python file, there should at least be one called `create.py`, but there can be additional.
"""
path = repo_path / "data" / dataset_name
assert (path / f"{path.name}.parquet").exists()
assert (path / f"{path.name}.md").exists()
if any(p.name.endswith(".py") for p in path.glob("*")):
assert (path / "create.py").exists()
|