url
stringlengths
61
61
repository_url
stringclasses
1 value
labels_url
stringlengths
75
75
comments_url
stringlengths
70
70
events_url
stringlengths
68
68
html_url
stringlengths
49
51
id
int64
942M
3.2B
node_id
stringlengths
18
32
number
int64
2.63k
7.67k
title
stringlengths
1
290
user
dict
labels
listlengths
0
4
state
stringclasses
2 values
locked
bool
1 class
assignee
dict
assignees
listlengths
0
4
milestone
dict
comments
int64
0
49
created_at
stringdate
2021-07-12 19:58:31
2025-07-03 11:24:15
updated_at
stringdate
2021-07-13 05:45:26
2025-07-03 18:34:32
closed_at
stringlengths
20
20
author_association
stringclasses
4 values
type
null
active_lock_reason
null
sub_issues_summary
dict
body
stringlengths
0
58.6k
closed_by
dict
reactions
dict
timeline_url
stringlengths
70
70
performed_via_github_app
null
state_reason
stringclasses
4 values
draft
bool
2 classes
pull_request
dict
https://api.github.com/repos/huggingface/datasets/issues/7463
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7463/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7463/comments
https://api.github.com/repos/huggingface/datasets/issues/7463/events
https://github.com/huggingface/datasets/pull/7463
2,925,924,452
PR_kwDODunzps6O-I6K
7,463
Adds EXR format to store depth images in float32
{ "avatar_url": "https://avatars.githubusercontent.com/u/4803565?v=4", "events_url": "https://api.github.com/users/ducha-aiki/events{/privacy}", "followers_url": "https://api.github.com/users/ducha-aiki/followers", "following_url": "https://api.github.com/users/ducha-aiki/following{/other_user}", "gists_url": "https://api.github.com/users/ducha-aiki/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ducha-aiki", "id": 4803565, "login": "ducha-aiki", "node_id": "MDQ6VXNlcjQ4MDM1NjU=", "organizations_url": "https://api.github.com/users/ducha-aiki/orgs", "received_events_url": "https://api.github.com/users/ducha-aiki/received_events", "repos_url": "https://api.github.com/users/ducha-aiki/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ducha-aiki/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ducha-aiki/subscriptions", "type": "User", "url": "https://api.github.com/users/ducha-aiki", "user_view_type": "public" }
[]
open
false
null
[]
null
3
2025-03-17T17:42:40Z
2025-04-02T12:33:39Z
null
NONE
null
null
null
This PR adds the EXR feature to store depth images (or can be normals, etc) in float32. It relies on [openexr_numpy](https://github.com/martinResearch/openexr_numpy/tree/main) to manipulate EXR images.
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7463/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7463/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7463.diff", "html_url": "https://github.com/huggingface/datasets/pull/7463", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/7463.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7463" }
https://api.github.com/repos/huggingface/datasets/issues/7462
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7462/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7462/comments
https://api.github.com/repos/huggingface/datasets/issues/7462/events
https://github.com/huggingface/datasets/pull/7462
2,925,612,945
PR_kwDODunzps6O9EA1
7,462
set dev version
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
1
2025-03-17T16:00:53Z
2025-03-17T16:03:31Z
2025-03-17T16:01:08Z
MEMBER
null
null
null
null
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7462/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7462/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7462.diff", "html_url": "https://github.com/huggingface/datasets/pull/7462", "merged_at": "2025-03-17T16:01:08Z", "patch_url": "https://github.com/huggingface/datasets/pull/7462.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7462" }
https://api.github.com/repos/huggingface/datasets/issues/7461
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7461/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7461/comments
https://api.github.com/repos/huggingface/datasets/issues/7461/events
https://github.com/huggingface/datasets/issues/7461
2,925,608,123
I_kwDODunzps6uYTy7
7,461
List of images behave differently on IterableDataset and Dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/1288009?v=4", "events_url": "https://api.github.com/users/FredrikNoren/events{/privacy}", "followers_url": "https://api.github.com/users/FredrikNoren/followers", "following_url": "https://api.github.com/users/FredrikNoren/following{/other_user}", "gists_url": "https://api.github.com/users/FredrikNoren/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/FredrikNoren", "id": 1288009, "login": "FredrikNoren", "node_id": "MDQ6VXNlcjEyODgwMDk=", "organizations_url": "https://api.github.com/users/FredrikNoren/orgs", "received_events_url": "https://api.github.com/users/FredrikNoren/received_events", "repos_url": "https://api.github.com/users/FredrikNoren/repos", "site_admin": false, "starred_url": "https://api.github.com/users/FredrikNoren/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/FredrikNoren/subscriptions", "type": "User", "url": "https://api.github.com/users/FredrikNoren", "user_view_type": "public" }
[]
closed
false
null
[]
null
2
2025-03-17T15:59:23Z
2025-03-18T08:57:17Z
2025-03-18T08:57:16Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug This code: ```python def train_iterable_gen(): images = np.array(load_image("https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg").resize((128, 128))) yield { "images": np.expand_dims(images, axis=0), "messages": [ { "role": "user", "content": [{"type": "image", "url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" }] }, { "role": "assistant", "content": [{"type": "text", "text": "duck" }] } ] } train_ds = Dataset.from_generator(train_iterable_gen, features=Features({ 'images': [datasets.Image(mode=None, decode=True, id=None)], 'messages': [{'content': [{'text': datasets.Value(dtype='string', id=None), 'type': datasets.Value(dtype='string', id=None) }], 'role': datasets.Value(dtype='string', id=None)}] } ) ) ``` works as I'd expect; if I iterate the dataset then the `images` column returns a `List[PIL.Image.Image]`, i.e. `'images': [<PIL.PngImagePlugin.PngImageFile image mode=RGB size=128x128 at 0x77EFB7EF4680>]`. But if I change `Dataset` to `IterableDataset`, the `images` column changes into `'images': [{'path': None, 'bytes': ..]` ### Steps to reproduce the bug The code above + ```python def load_image(url): response = requests.get(url) image = Image.open(io.BytesIO(response.content)) return image ``` I'm feeding it to SFTTrainer ### Expected behavior Dataset and IterableDataset would behave the same ### Environment info ```yaml requires-python = ">=3.12" dependencies = [ "av>=14.1.0", "boto3>=1.36.7", "datasets>=3.3.2", "docker>=7.1.0", "google-cloud-storage>=2.19.0", "grpcio>=1.70.0", "grpcio-tools>=1.70.0", "moviepy>=2.1.2", "open-clip-torch>=2.31.0", "opencv-python>=4.11.0.86; sys_platform == 'darwin'", "opencv-python-headless>=4.11.0.86; sys_platform == 'linux'", "pandas>=2.2.3", "pillow>=10.4.0", "plotly>=6.0.0", "py-spy>=0.4.0", "pydantic>=2.10.6", "pydantic-settings>=2.7.1", "pymysql>=1.1.1", "ray[data,default,serve,train,tune]>=2.43.0", "torch>=2.6.0", "torchmetrics>=1.6.1", "torchvision>=0.21.0", "transformers[torch]@git+https://github.com/huggingface/transformers", "wandb>=0.19.4", # https://github.com/Dao-AILab/flash-attention/issues/833 "flash-attn @ https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.3/flash_attn-2.7.3+cu12torch2.6cxx11abiFALSE-cp312-cp312-linux_x86_64.whl; sys_platform == 'linux'", "trl@https://github.com/huggingface/trl.git", "peft>=0.14.0", ] ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/1288009?v=4", "events_url": "https://api.github.com/users/FredrikNoren/events{/privacy}", "followers_url": "https://api.github.com/users/FredrikNoren/followers", "following_url": "https://api.github.com/users/FredrikNoren/following{/other_user}", "gists_url": "https://api.github.com/users/FredrikNoren/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/FredrikNoren", "id": 1288009, "login": "FredrikNoren", "node_id": "MDQ6VXNlcjEyODgwMDk=", "organizations_url": "https://api.github.com/users/FredrikNoren/orgs", "received_events_url": "https://api.github.com/users/FredrikNoren/received_events", "repos_url": "https://api.github.com/users/FredrikNoren/repos", "site_admin": false, "starred_url": "https://api.github.com/users/FredrikNoren/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/FredrikNoren/subscriptions", "type": "User", "url": "https://api.github.com/users/FredrikNoren", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7461/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7461/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/7460
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7460/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7460/comments
https://api.github.com/repos/huggingface/datasets/issues/7460/events
https://github.com/huggingface/datasets/pull/7460
2,925,605,865
PR_kwDODunzps6O9Ccc
7,460
release: 3.4.1
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
1
2025-03-17T15:58:31Z
2025-03-17T16:01:14Z
2025-03-17T15:59:19Z
MEMBER
null
null
null
null
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7460/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7460/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7460.diff", "html_url": "https://github.com/huggingface/datasets/pull/7460", "merged_at": "2025-03-17T15:59:19Z", "patch_url": "https://github.com/huggingface/datasets/pull/7460.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7460" }
https://api.github.com/repos/huggingface/datasets/issues/7459
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7459/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7459/comments
https://api.github.com/repos/huggingface/datasets/issues/7459/events
https://github.com/huggingface/datasets/pull/7459
2,925,491,766
PR_kwDODunzps6O8pWp
7,459
Fix data_files filtering
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
1
2025-03-17T15:20:21Z
2025-03-17T15:25:56Z
2025-03-17T15:25:54Z
MEMBER
null
null
null
close https://github.com/huggingface/datasets/issues/7458
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/7459/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7459/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7459.diff", "html_url": "https://github.com/huggingface/datasets/pull/7459", "merged_at": "2025-03-17T15:25:53Z", "patch_url": "https://github.com/huggingface/datasets/pull/7459.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7459" }
https://api.github.com/repos/huggingface/datasets/issues/7458
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7458/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7458/comments
https://api.github.com/repos/huggingface/datasets/issues/7458/events
https://github.com/huggingface/datasets/issues/7458
2,925,403,528
I_kwDODunzps6uXh2I
7,458
Loading the `laion/filtered-wit` dataset in streaming mode fails on v3.4.0
{ "avatar_url": "https://avatars.githubusercontent.com/u/23343961?v=4", "events_url": "https://api.github.com/users/nikita-savelyevv/events{/privacy}", "followers_url": "https://api.github.com/users/nikita-savelyevv/followers", "following_url": "https://api.github.com/users/nikita-savelyevv/following{/other_user}", "gists_url": "https://api.github.com/users/nikita-savelyevv/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/nikita-savelyevv", "id": 23343961, "login": "nikita-savelyevv", "node_id": "MDQ6VXNlcjIzMzQzOTYx", "organizations_url": "https://api.github.com/users/nikita-savelyevv/orgs", "received_events_url": "https://api.github.com/users/nikita-savelyevv/received_events", "repos_url": "https://api.github.com/users/nikita-savelyevv/repos", "site_admin": false, "starred_url": "https://api.github.com/users/nikita-savelyevv/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nikita-savelyevv/subscriptions", "type": "User", "url": "https://api.github.com/users/nikita-savelyevv", "user_view_type": "public" }
[]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" } ]
null
1
2025-03-17T14:54:02Z
2025-03-17T16:02:04Z
2025-03-17T15:25:55Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug Loading https://huggingface.co/datasets/laion/filtered-wit in streaming mode fails after update to `datasets==3.4.0`. The dataset loads fine on v3.3.2. ### Steps to reproduce the bug Steps to reproduce: ``` pip install datastes==3.4.0 python -c "from datasets import load_dataset; load_dataset('laion/filtered-wit', split='train', streaming=True)" ``` Results in: ``` $ python -c "from datasets import load_dataset; load_dataset('laion/filtered-wit', split='train', streaming=True)" Repo card metadata block was not found. Setting CardData to empty. Resolving data files: 100%|█████████████████████████████████████████████████████████████████████████████████████████████| 560/560 [00:00<00:00, 2280.24it/s] Traceback (most recent call last): File "<string>", line 1, in <module> File "/home/nsavel/venvs/tmp/lib/python3.9/site-packages/datasets/load.py", line 2080, in load_dataset return builder_instance.as_streaming_dataset(split=split) File "/home/nsavel/venvs/tmp/lib/python3.9/site-packages/datasets/builder.py", line 1265, in as_streaming_dataset splits_generators = {sg.name: sg for sg in self._split_generators(dl_manager)} File "/home/nsavel/venvs/tmp/lib/python3.9/site-packages/datasets/packaged_modules/parquet/parquet.py", line 49, in _split_generators data_files = dl_manager.download_and_extract(self.config.data_files) File "/home/nsavel/venvs/tmp/lib/python3.9/site-packages/datasets/download/streaming_download_manager.py", line 169, in download_and_extract return self.extract(self.download(url_or_urls)) File "/home/nsavel/venvs/tmp/lib/python3.9/site-packages/datasets/download/streaming_download_manager.py", line 121, in extract urlpaths = map_nested(self._extract, url_or_urls, map_tuple=True) File "/home/nsavel/venvs/tmp/lib/python3.9/site-packages/datasets/utils/py_utils.py", line 496, in map_nested mapped = [ File "/home/nsavel/venvs/tmp/lib/python3.9/site-packages/datasets/utils/py_utils.py", line 497, in <listcomp> map_nested( File "/home/nsavel/venvs/tmp/lib/python3.9/site-packages/datasets/utils/py_utils.py", line 513, in map_nested mapped = [ File "/home/nsavel/venvs/tmp/lib/python3.9/site-packages/datasets/utils/py_utils.py", line 514, in <listcomp> _single_map_nested((function, obj, batched, batch_size, types, None, True, None)) File "/home/nsavel/venvs/tmp/lib/python3.9/site-packages/datasets/utils/py_utils.py", line 375, in _single_map_nested return function(data_struct) File "/home/nsavel/venvs/tmp/lib/python3.9/site-packages/datasets/download/streaming_download_manager.py", line 131, in _extract raise NotImplementedError( NotImplementedError: Extraction protocol for TAR archives like 'hf://datasets/laion/filtered-wit@c38ca7464e9934d9a49f88b3f60f5ad63b245465/data/00000.tar' is not implemented in streaming mode. Please use `dl_manager.iter_archive` instead. Example usage: url = dl_manager.download(url) tar_archive_iterator = dl_manager.iter_archive(url) for filename, file in tar_archive_iterator: ... ``` ### Expected behavior Dataset loads successfully. ### Environment info Ubuntu 20.04.6. Python 3.9. Datasets 3.4.0. pip freeze: ``` aiohappyeyeballs==2.6.1 aiohttp==3.11.14 aiosignal==1.3.2 async-timeout==5.0.1 attrs==25.3.0 certifi==2025.1.31 charset-normalizer==3.4.1 datasets==3.4.0 dill==0.3.8 filelock==3.18.0 frozenlist==1.5.0 fsspec==2024.12.0 huggingface-hub==0.29.3 idna==3.10 multidict==6.1.0 multiprocess==0.70.16 numpy==2.0.2 packaging==24.2 pandas==2.2.3 propcache==0.3.0 pyarrow==19.0.1 python-dateutil==2.9.0.post0 pytz==2025.1 PyYAML==6.0.2 requests==2.32.3 six==1.17.0 tqdm==4.67.1 typing_extensions==4.12.2 tzdata==2025.1 urllib3==2.3.0 xxhash==3.5.0 yarl==1.18.3 ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7458/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7458/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/7457
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7457/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7457/comments
https://api.github.com/repos/huggingface/datasets/issues/7457/events
https://github.com/huggingface/datasets/issues/7457
2,924,886,467
I_kwDODunzps6uVjnD
7,457
Document the HF_DATASETS_CACHE env variable
{ "avatar_url": "https://avatars.githubusercontent.com/u/92166725?v=4", "events_url": "https://api.github.com/users/LSerranoPEReN/events{/privacy}", "followers_url": "https://api.github.com/users/LSerranoPEReN/followers", "following_url": "https://api.github.com/users/LSerranoPEReN/following{/other_user}", "gists_url": "https://api.github.com/users/LSerranoPEReN/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/LSerranoPEReN", "id": 92166725, "login": "LSerranoPEReN", "node_id": "U_kgDOBX5aRQ", "organizations_url": "https://api.github.com/users/LSerranoPEReN/orgs", "received_events_url": "https://api.github.com/users/LSerranoPEReN/received_events", "repos_url": "https://api.github.com/users/LSerranoPEReN/repos", "site_admin": false, "starred_url": "https://api.github.com/users/LSerranoPEReN/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/LSerranoPEReN/subscriptions", "type": "User", "url": "https://api.github.com/users/LSerranoPEReN", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/129883215?v=4", "events_url": "https://api.github.com/users/Harry-Yang0518/events{/privacy}", "followers_url": "https://api.github.com/users/Harry-Yang0518/followers", "following_url": "https://api.github.com/users/Harry-Yang0518/following{/other_user}", "gists_url": "https://api.github.com/users/Harry-Yang0518/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Harry-Yang0518", "id": 129883215, "login": "Harry-Yang0518", "node_id": "U_kgDOB73cTw", "organizations_url": "https://api.github.com/users/Harry-Yang0518/orgs", "received_events_url": "https://api.github.com/users/Harry-Yang0518/received_events", "repos_url": "https://api.github.com/users/Harry-Yang0518/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Harry-Yang0518/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Harry-Yang0518/subscriptions", "type": "User", "url": "https://api.github.com/users/Harry-Yang0518", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/129883215?v=4", "events_url": "https://api.github.com/users/Harry-Yang0518/events{/privacy}", "followers_url": "https://api.github.com/users/Harry-Yang0518/followers", "following_url": "https://api.github.com/users/Harry-Yang0518/following{/other_user}", "gists_url": "https://api.github.com/users/Harry-Yang0518/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Harry-Yang0518", "id": 129883215, "login": "Harry-Yang0518", "node_id": "U_kgDOB73cTw", "organizations_url": "https://api.github.com/users/Harry-Yang0518/orgs", "received_events_url": "https://api.github.com/users/Harry-Yang0518/received_events", "repos_url": "https://api.github.com/users/Harry-Yang0518/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Harry-Yang0518/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Harry-Yang0518/subscriptions", "type": "User", "url": "https://api.github.com/users/Harry-Yang0518", "user_view_type": "public" } ]
null
4
2025-03-17T12:24:50Z
2025-05-06T15:54:39Z
2025-05-06T15:54:39Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Feature request Hello, I have a use case where my team is sharing models and dataset in shared directory to avoid duplication. I noticed that the [cache documentation for datasets](https://huggingface.co/docs/datasets/main/en/cache) only mention the `HF_HOME` environment variable but never the `HF_DATASETS_CACHE`. It should be nice to add `HF_DATASETS_CACHE` to datasets documentation if it's an intended feature. If it's not, I think a depreciation warning would be appreciated. ### Motivation This variable is fully working and similar to what `HF_HUB_CACHE` does for models, so it's nice to know that this exists. This seems to be a quick change to implement. ### Your contribution I could contribute since this is only affecting a small portion of the documentation
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7457/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7457/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/7456
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7456/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7456/comments
https://api.github.com/repos/huggingface/datasets/issues/7456/events
https://github.com/huggingface/datasets/issues/7456
2,922,676,278
I_kwDODunzps6uNIA2
7,456
.add_faiss_index and .add_elasticsearch_index returns ImportError at Google Colab
{ "avatar_url": "https://avatars.githubusercontent.com/u/109490785?v=4", "events_url": "https://api.github.com/users/MapleBloom/events{/privacy}", "followers_url": "https://api.github.com/users/MapleBloom/followers", "following_url": "https://api.github.com/users/MapleBloom/following{/other_user}", "gists_url": "https://api.github.com/users/MapleBloom/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/MapleBloom", "id": 109490785, "login": "MapleBloom", "node_id": "U_kgDOBoayYQ", "organizations_url": "https://api.github.com/users/MapleBloom/orgs", "received_events_url": "https://api.github.com/users/MapleBloom/received_events", "repos_url": "https://api.github.com/users/MapleBloom/repos", "site_admin": false, "starred_url": "https://api.github.com/users/MapleBloom/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/MapleBloom/subscriptions", "type": "User", "url": "https://api.github.com/users/MapleBloom", "user_view_type": "public" }
[]
open
false
null
[]
null
6
2025-03-16T00:51:49Z
2025-03-17T15:57:19Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug At Google Colab ```!pip install faiss-cpu``` works ```import faiss``` no error but ```embeddings_dataset.add_faiss_index(column='embeddings')``` returns ``` [/usr/local/lib/python3.11/dist-packages/datasets/search.py](https://localhost:8080/#) in init(self, device, string_factory, metric_type, custom_index) 247 self.faiss_index = custom_index 248 if not _has_faiss: --> 249 raise ImportError( 250 "You must install Faiss to use FaissIndex. To do so you can run conda install -c pytorch faiss-cpu or conda install -c pytorch faiss-gpu. " 251 "A community supported package is also available on pypi: pip install faiss-cpu or pip install faiss-gpu. " ``` because ```_has_faiss = importlib.util.find_spec("faiss") is not None``` at the beginning of ```datasets/search.py``` returns ```False``` when the same code at colab notebook returns ```ModuleSpec(name='faiss', loader=<_frozen_importlib_external.SourceFileLoader object at 0x7b7851449f50>, origin='/usr/local/lib/python3.11/dist-packages/faiss/init.py', submodule_search_locations=['/usr/local/lib/python3.11/dist-packages/faiss'])``` But ``` import datasets datasets.search._has_faiss ``` at ```colab notebook``` also returns ```False``` The same story with ```_has_elasticsearch``` ### Steps to reproduce the bug 1. Follow https://huggingface.co/learn/nlp-course/chapter5/6?fw=pt at Google Colab 2. till ```embeddings_dataset.add_faiss_index(column='embeddings')``` 3. ```embeddings_dataset.add_elasticsearch_index(column='embeddings')``` 4. https://colab.research.google.com/drive/1h2cjuiClblqzbNQgrcoLYOC8zBqTLLcv#scrollTo=3ddzRp72auOF ### Expected behavior I've only started Tutorial and don't know exactly. But something tells me that ```embeddings_dataset.add_faiss_index(column='embeddings')``` should work without ```Import Error``` ### Environment info Google Colab notebook with default config
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7456/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7456/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7455
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7455/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7455/comments
https://api.github.com/repos/huggingface/datasets/issues/7455/events
https://github.com/huggingface/datasets/issues/7455
2,921,933,250
I_kwDODunzps6uKSnC
7,455
Problems with local dataset after upgrade from 3.3.2 to 3.4.0
{ "avatar_url": "https://avatars.githubusercontent.com/u/60151338?v=4", "events_url": "https://api.github.com/users/andjoer/events{/privacy}", "followers_url": "https://api.github.com/users/andjoer/followers", "following_url": "https://api.github.com/users/andjoer/following{/other_user}", "gists_url": "https://api.github.com/users/andjoer/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/andjoer", "id": 60151338, "login": "andjoer", "node_id": "MDQ6VXNlcjYwMTUxMzM4", "organizations_url": "https://api.github.com/users/andjoer/orgs", "received_events_url": "https://api.github.com/users/andjoer/received_events", "repos_url": "https://api.github.com/users/andjoer/repos", "site_admin": false, "starred_url": "https://api.github.com/users/andjoer/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/andjoer/subscriptions", "type": "User", "url": "https://api.github.com/users/andjoer", "user_view_type": "public" }
[]
open
false
null
[]
null
1
2025-03-15T09:22:50Z
2025-03-17T16:20:43Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug I was not able to open a local saved dataset anymore that was created using an older datasets version after the upgrade yesterday from datasets 3.3.2 to 3.4.0 The traceback is ``` Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/datasets/packaged_modules/arrow/arrow.py", line 67, in _generate_tables batches = pa.ipc.open_stream(f) File "/usr/local/lib/python3.10/dist-packages/pyarrow/ipc.py", line 190, in open_stream return RecordBatchStreamReader(source, options=options, File "/usr/local/lib/python3.10/dist-packages/pyarrow/ipc.py", line 52, in __init__ self._open(source, options=options, memory_pool=memory_pool) File "pyarrow/ipc.pxi", line 1006, in pyarrow.lib._RecordBatchStreamReader._open File "pyarrow/error.pxi", line 155, in pyarrow.lib.pyarrow_internal_check_status File "pyarrow/error.pxi", line 92, in pyarrow.lib.check_status pyarrow.lib.ArrowInvalid: Expected to read 538970747 metadata bytes, but only read 2126 During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/datasets/builder.py", line 1855, in _prepare_split_single for _, table in generator: File "/usr/local/lib/python3.10/dist-packages/datasets/packaged_modules/arrow/arrow.py", line 69, in _generate_tables reader = pa.ipc.open_file(f) File "/usr/local/lib/python3.10/dist-packages/pyarrow/ipc.py", line 234, in open_file return RecordBatchFileReader( File "/usr/local/lib/python3.10/dist-packages/pyarrow/ipc.py", line 110, in __init__ self._open(source, footer_offset=footer_offset, File "pyarrow/ipc.pxi", line 1090, in pyarrow.lib._RecordBatchFileReader._open File "pyarrow/error.pxi", line 155, in pyarrow.lib.pyarrow_internal_check_status File "pyarrow/error.pxi", line 92, in pyarrow.lib.check_status pyarrow.lib.ArrowInvalid: Not an Arrow file ``` ### Steps to reproduce the bug Load a dataset from a local folder with ``` dataset = load_dataset( args.train_data_dir, cache_dir=args.cache_dir, ) ``` as it is done for example in the training script for SD3 controlnet. This is the minimal script to test it: ``` from datasets import load_dataset def main(): dataset = load_dataset( "local_dataset", ) print(dataset) print("Sample data:", dataset["train"][0]) if __name__ == "__main__": main() ```` ### Expected behavior Work in 3.4.0 like in 3.3.2 ### Environment info - `datasets` version: 3.4.0 - Platform: Linux-5.15.0-75-generic-x86_64-with-glibc2.35 - Python version: 3.10.12 - `huggingface_hub` version: 0.29.3 - PyArrow version: 19.0.1 - Pandas version: 2.2.3 - `fsspec` version: 2024.12.0
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7455/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7455/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7454
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7454/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7454/comments
https://api.github.com/repos/huggingface/datasets/issues/7454/events
https://github.com/huggingface/datasets/pull/7454
2,920,760,793
PR_kwDODunzps6Os6bx
7,454
set dev version
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
1
2025-03-14T16:48:19Z
2025-03-14T16:50:31Z
2025-03-14T16:48:28Z
MEMBER
null
null
null
null
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7454/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7454/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7454.diff", "html_url": "https://github.com/huggingface/datasets/pull/7454", "merged_at": "2025-03-14T16:48:28Z", "patch_url": "https://github.com/huggingface/datasets/pull/7454.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7454" }
https://api.github.com/repos/huggingface/datasets/issues/7453
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7453/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7453/comments
https://api.github.com/repos/huggingface/datasets/issues/7453/events
https://github.com/huggingface/datasets/pull/7453
2,920,719,503
PR_kwDODunzps6OsxR1
7,453
release: 3.4.0
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
1
2025-03-14T16:30:45Z
2025-03-14T16:38:10Z
2025-03-14T16:38:08Z
MEMBER
null
null
null
null
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7453/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7453/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7453.diff", "html_url": "https://github.com/huggingface/datasets/pull/7453", "merged_at": "2025-03-14T16:38:08Z", "patch_url": "https://github.com/huggingface/datasets/pull/7453.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7453" }
https://api.github.com/repos/huggingface/datasets/issues/7452
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7452/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7452/comments
https://api.github.com/repos/huggingface/datasets/issues/7452/events
https://github.com/huggingface/datasets/pull/7452
2,920,354,783
PR_kwDODunzps6Orhw4
7,452
minor docs changes
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
1
2025-03-14T14:14:04Z
2025-03-14T14:16:38Z
2025-03-14T14:14:20Z
MEMBER
null
null
null
before the release
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7452/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7452/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7452.diff", "html_url": "https://github.com/huggingface/datasets/pull/7452", "merged_at": "2025-03-14T14:14:20Z", "patch_url": "https://github.com/huggingface/datasets/pull/7452.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7452" }
https://api.github.com/repos/huggingface/datasets/issues/7451
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7451/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7451/comments
https://api.github.com/repos/huggingface/datasets/issues/7451/events
https://github.com/huggingface/datasets/pull/7451
2,919,835,663
PR_kwDODunzps6OpwDz
7,451
Fix resuming after `ds.set_epoch(new_epoch)`
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
1
2025-03-14T10:31:25Z
2025-03-14T10:50:11Z
2025-03-14T10:50:09Z
MEMBER
null
null
null
close https://github.com/huggingface/datasets/issues/7447
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7451/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7451/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7451.diff", "html_url": "https://github.com/huggingface/datasets/pull/7451", "merged_at": "2025-03-14T10:50:09Z", "patch_url": "https://github.com/huggingface/datasets/pull/7451.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7451" }
https://api.github.com/repos/huggingface/datasets/issues/7450
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7450/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7450/comments
https://api.github.com/repos/huggingface/datasets/issues/7450/events
https://github.com/huggingface/datasets/pull/7450
2,916,681,414
PR_kwDODunzps6OfMKs
7,450
Add IterableDataset.decode with multithreading
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
1
2025-03-13T10:41:35Z
2025-03-14T10:35:37Z
2025-03-14T10:35:35Z
MEMBER
null
null
null
Useful for dataset streaming for multimodal datasets, and especially for lerobot. It speeds up streaming up to 20 times. When decoding is enabled (default), media types are decoded: * audio -> dict of "array" and "sampling_rate" and "path" * image -> PIL.Image * video -> torchvision.io.VideoReader You can enable multithreading using `num_threads`. This is especially useful to speed up remote data streaming. However it can be slower than `num_threads=0` for local data on fast disks. PS: Disabling decoding is useful if you want to iterate on the paths or bytes of the media files without actually decoding their content. Example: Speed up streaming with multithreading: ```py >>> import os >>> from datasets import load_dataset >>> from tqdm import tqdm >>> ds = load_dataset("sshh12/planet-textures", split="train", streaming=True) >>> num_threads = min(32, (os.cpu_count() or 1) + 4) >>> ds = ds.decode(num_threads=num_threads) >>> for _ in tqdm(ds): # 20 times faster ! ... ... ``` why not multiprocessing ? decoding is done with the GIL released in soundfile/PIL/torchvision so multiprocessing would just use more memory TODO - [x] test - [x] add to docs
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7450/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7450/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7450.diff", "html_url": "https://github.com/huggingface/datasets/pull/7450", "merged_at": "2025-03-14T10:35:35Z", "patch_url": "https://github.com/huggingface/datasets/pull/7450.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7450" }
https://api.github.com/repos/huggingface/datasets/issues/7449
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7449/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7449/comments
https://api.github.com/repos/huggingface/datasets/issues/7449/events
https://github.com/huggingface/datasets/issues/7449
2,916,235,092
I_kwDODunzps6t0jdU
7,449
Cannot load data with different schemas from different parquet files
{ "avatar_url": "https://avatars.githubusercontent.com/u/39846316?v=4", "events_url": "https://api.github.com/users/li-plus/events{/privacy}", "followers_url": "https://api.github.com/users/li-plus/followers", "following_url": "https://api.github.com/users/li-plus/following{/other_user}", "gists_url": "https://api.github.com/users/li-plus/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/li-plus", "id": 39846316, "login": "li-plus", "node_id": "MDQ6VXNlcjM5ODQ2MzE2", "organizations_url": "https://api.github.com/users/li-plus/orgs", "received_events_url": "https://api.github.com/users/li-plus/received_events", "repos_url": "https://api.github.com/users/li-plus/repos", "site_admin": false, "starred_url": "https://api.github.com/users/li-plus/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/li-plus/subscriptions", "type": "User", "url": "https://api.github.com/users/li-plus", "user_view_type": "public" }
[]
closed
false
null
[]
null
2
2025-03-13T08:14:49Z
2025-03-17T07:27:48Z
2025-03-17T07:27:46Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug Cannot load samples with optional fields from different files. The schema cannot be correctly derived. ### Steps to reproduce the bug When I place two samples with an optional field `some_extra_field` within a single parquet file, it can be loaded via `load_dataset`. ```python import pandas as pd from datasets import load_dataset data = [ {'conversations': {'role': 'user', 'content': 'hello'}}, {'conversations': {'role': 'user', 'content': 'hi', 'some_extra_field': 'some_value'}} ] df = pd.DataFrame(data) df.to_parquet('data.parquet') dataset = load_dataset('parquet', data_files='data.parquet', split='train') print(dataset.features) ``` The schema can be derived. `some_extra_field` is set to None for the first row where it is absent. ``` {'conversations': {'content': Value(dtype='string', id=None), 'role': Value(dtype='string', id=None), 'some_extra_field': Value(dtype='string', id=None)}} ``` However, when I separate the samples into different files, it cannot be loaded. ```python import pandas as pd from datasets import load_dataset data1 = [{'conversations': {'role': 'user', 'content': 'hello'}}] pd.DataFrame(data1).to_parquet('data1.parquet') data2 = [{'conversations': {'role': 'user', 'content': 'hi', 'some_extra_field': 'some_value'}}] pd.DataFrame(data2).to_parquet('data2.parquet') dataset = load_dataset('parquet', data_files=['data1.parquet', 'data2.parquet'], split='train') print(dataset.features) ``` Traceback: ``` Traceback (most recent call last): File "/home/tiger/.local/lib/python3.9/site-packages/datasets/builder.py", line 1854, in _prepare_split_single for _, table in generator: File "/home/tiger/.local/lib/python3.9/site-packages/datasets/packaged_modules/parquet/parquet.py", line 106, in _generate_tables yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table) File "/home/tiger/.local/lib/python3.9/site-packages/datasets/packaged_modules/parquet/parquet.py", line 73, in _cast_table pa_table = table_cast(pa_table, self.info.features.arrow_schema) File "/home/tiger/.local/lib/python3.9/site-packages/datasets/table.py", line 2292, in table_cast return cast_table_to_schema(table, schema) File "/home/tiger/.local/lib/python3.9/site-packages/datasets/table.py", line 2245, in cast_table_to_schema arrays = [ File "/home/tiger/.local/lib/python3.9/site-packages/datasets/table.py", line 2246, in <listcomp> cast_array_to_feature( File "/home/tiger/.local/lib/python3.9/site-packages/datasets/table.py", line 1795, in wrapper return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks]) File "/home/tiger/.local/lib/python3.9/site-packages/datasets/table.py", line 1795, in <listcomp> return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks]) File "/home/tiger/.local/lib/python3.9/site-packages/datasets/table.py", line 2108, in cast_array_to_feature raise TypeError(f"Couldn't cast array of type\n{_short_str(array.type)}\nto\n{_short_str(feature)}") TypeError: Couldn't cast array of type struct<content: string, role: string, some_extra_field: string> to {'content': Value(dtype='string', id=None), 'role': Value(dtype='string', id=None)} ``` ### Expected behavior Correctly load data with optional fields from different parquet files. ### Environment info - `datasets` version: 3.3.2 - Platform: Linux-5.10.135.bsk.4-amd64-x86_64-with-glibc2.31 - Python version: 3.9.2 - `huggingface_hub` version: 0.28.1 - PyArrow version: 17.0.0 - Pandas version: 2.2.2 - `fsspec` version: 2024.3.1
{ "avatar_url": "https://avatars.githubusercontent.com/u/39846316?v=4", "events_url": "https://api.github.com/users/li-plus/events{/privacy}", "followers_url": "https://api.github.com/users/li-plus/followers", "following_url": "https://api.github.com/users/li-plus/following{/other_user}", "gists_url": "https://api.github.com/users/li-plus/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/li-plus", "id": 39846316, "login": "li-plus", "node_id": "MDQ6VXNlcjM5ODQ2MzE2", "organizations_url": "https://api.github.com/users/li-plus/orgs", "received_events_url": "https://api.github.com/users/li-plus/received_events", "repos_url": "https://api.github.com/users/li-plus/repos", "site_admin": false, "starred_url": "https://api.github.com/users/li-plus/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/li-plus/subscriptions", "type": "User", "url": "https://api.github.com/users/li-plus", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7449/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7449/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/7448
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7448/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7448/comments
https://api.github.com/repos/huggingface/datasets/issues/7448/events
https://github.com/huggingface/datasets/issues/7448
2,916,025,762
I_kwDODunzps6tzwWi
7,448
`datasets.disable_caching` doesn't work
{ "avatar_url": "https://avatars.githubusercontent.com/u/35629974?v=4", "events_url": "https://api.github.com/users/UCC-team/events{/privacy}", "followers_url": "https://api.github.com/users/UCC-team/followers", "following_url": "https://api.github.com/users/UCC-team/following{/other_user}", "gists_url": "https://api.github.com/users/UCC-team/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/UCC-team", "id": 35629974, "login": "UCC-team", "node_id": "MDQ6VXNlcjM1NjI5OTc0", "organizations_url": "https://api.github.com/users/UCC-team/orgs", "received_events_url": "https://api.github.com/users/UCC-team/received_events", "repos_url": "https://api.github.com/users/UCC-team/repos", "site_admin": false, "starred_url": "https://api.github.com/users/UCC-team/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/UCC-team/subscriptions", "type": "User", "url": "https://api.github.com/users/UCC-team", "user_view_type": "public" }
[]
open
false
null
[]
null
2
2025-03-13T06:40:12Z
2025-03-22T04:37:07Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
When I use `Dataset.from_generator(my_gen)` to load my dataset, it simply skips my changes to the generator function. I tried `datasets.disable_caching`, but it doesn't work!
null
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/7448/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7448/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7447
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7447/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7447/comments
https://api.github.com/repos/huggingface/datasets/issues/7447/events
https://github.com/huggingface/datasets/issues/7447
2,915,233,248
I_kwDODunzps6twu3g
7,447
Epochs shortened after resuming mid-epoch with Iterable dataset+StatefulDataloader(persistent_workers=True)
{ "avatar_url": "https://avatars.githubusercontent.com/u/4356534?v=4", "events_url": "https://api.github.com/users/dhruvdcoder/events{/privacy}", "followers_url": "https://api.github.com/users/dhruvdcoder/followers", "following_url": "https://api.github.com/users/dhruvdcoder/following{/other_user}", "gists_url": "https://api.github.com/users/dhruvdcoder/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/dhruvdcoder", "id": 4356534, "login": "dhruvdcoder", "node_id": "MDQ6VXNlcjQzNTY1MzQ=", "organizations_url": "https://api.github.com/users/dhruvdcoder/orgs", "received_events_url": "https://api.github.com/users/dhruvdcoder/received_events", "repos_url": "https://api.github.com/users/dhruvdcoder/repos", "site_admin": false, "starred_url": "https://api.github.com/users/dhruvdcoder/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhruvdcoder/subscriptions", "type": "User", "url": "https://api.github.com/users/dhruvdcoder", "user_view_type": "public" }
[]
closed
false
null
[]
null
5
2025-03-12T21:41:05Z
2025-03-14T17:26:59Z
2025-03-14T10:50:10Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug When `torchdata.stateful_dataloader.StatefulDataloader(persistent_workers=True)` the epochs after resuming only iterate through the examples that were left in the epoch when the training was interrupted. For example, in the script below training is interrupted on step 124 (epoch 1) when 3 batches are left. Then after resuming, the rest of epochs (2 and 3) only iterate through these 3 batches. ### Steps to reproduce the bug Run the following script with and with PERSISTENT_WORKERS=true. ```python # !/usr/bin/env python3 # torch==2.5.1 # datasets==3.3.2 # torchdata>=0.9.0 import datasets import pprint from torchdata.stateful_dataloader import StatefulDataLoader import os PERSISTENT_WORKERS = ( os.environ.get("PERSISTENT_WORKERS", "False").lower() == "true" ) # PERSISTENT_WORKERS = True # Incorrect resume # ds = datasets.load_from_disk("dataset").to_iterable_dataset(num_shards=4) def generator(): for i in range(128): yield {"x": i} ds = datasets.Dataset.from_generator( generator, features=datasets.Features({"x": datasets.Value("int32")}) ).to_iterable_dataset(num_shards=4) dl = StatefulDataLoader( ds, batch_size=2, num_workers=2, persistent_workers=PERSISTENT_WORKERS ) global_step = 0 epoch = 0 ds_state_dict = None state_dict = None resumed = False while True: if epoch >= 3: break if state_dict is not None: dl.load_state_dict(state_dict) state_dict = None ds_state_dict = None resumed = True print("resumed") for i, batch in enumerate(dl): print(f"epoch: {epoch}, global_step: {global_step}, batch: {batch}") global_step += 1 # consume datapoint # simulate error if global_step == 124 and not resumed: ds_state_dict = ds.state_dict() state_dict = dl.state_dict() print("checkpoint") print("ds_state_dict") pprint.pprint(ds_state_dict) print("dl_state_dict") pprint.pprint(state_dict) break if state_dict is None: ds.set_epoch(epoch) epoch += 1 ``` The script checkpoints when there are three batches left in the second epoch. After resuming, only the last three batches are repeated in the rest of the epochs. If it helps, following are the two state_dicts for the dataloader save at the same step with the two settings. The left one is for `PERSISTENT_WORKERS=False` ![Image](https://github.com/user-attachments/assets/c97d6502-d7bd-4ef4-ae2d-66fe1a9732b1) ### Expected behavior All the elements in the dataset should be iterated through in the epochs following the one where we resumed. The expected behavior can be seen by setting `PERSISTENT_WORKERS=False`. ### Environment info torch==2.5.1 datasets==3.3.2 torchdata>=0.9.0
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7447/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7447/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/7446
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7446/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7446/comments
https://api.github.com/repos/huggingface/datasets/issues/7446/events
https://github.com/huggingface/datasets/issues/7446
2,913,050,552
I_kwDODunzps6toZ-4
7,446
pyarrow.lib.ArrowTypeError: Expected dict key of type str or bytes, got 'int'
{ "avatar_url": "https://avatars.githubusercontent.com/u/88258534?v=4", "events_url": "https://api.github.com/users/rangehow/events{/privacy}", "followers_url": "https://api.github.com/users/rangehow/followers", "following_url": "https://api.github.com/users/rangehow/following{/other_user}", "gists_url": "https://api.github.com/users/rangehow/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/rangehow", "id": 88258534, "login": "rangehow", "node_id": "MDQ6VXNlcjg4MjU4NTM0", "organizations_url": "https://api.github.com/users/rangehow/orgs", "received_events_url": "https://api.github.com/users/rangehow/received_events", "repos_url": "https://api.github.com/users/rangehow/repos", "site_admin": false, "starred_url": "https://api.github.com/users/rangehow/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rangehow/subscriptions", "type": "User", "url": "https://api.github.com/users/rangehow", "user_view_type": "public" }
[]
open
false
null
[]
null
0
2025-03-12T07:48:37Z
2025-03-12T07:48:37Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug A dict with its keys are all str but get following error ```python test_data=[{'input_ids':[1,2,3],'labels':[[Counter({2:1})]]}] dataset = datasets.Dataset.from_list(test_data) ``` ```bash pyarrow.lib.ArrowTypeError: Expected dict key of type str or bytes, got 'int' ``` ### Steps to reproduce the bug . ### Expected behavior . ### Environment info datasets 3.3.2
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7446/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7446/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7445
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7445/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7445/comments
https://api.github.com/repos/huggingface/datasets/issues/7445/events
https://github.com/huggingface/datasets/pull/7445
2,911,507,923
PR_kwDODunzps6ONygU
7,445
Fix small bugs with async map
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
1
2025-03-11T18:30:57Z
2025-03-13T10:38:03Z
2025-03-13T10:37:58Z
MEMBER
null
null
null
helpful for the next PR to enable parallel image/audio/video decoding and make multimodal datasets go brr (e.g. for lerobot) - fix with_indices - fix resuming with save_state_dict() / load_state_dict() - omg that wasn't easy - remove unnecessary decoding in map() to enable parallelism in FormattedExampleIterable later small bonus: keeping features in batch()
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7445/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7445/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7445.diff", "html_url": "https://github.com/huggingface/datasets/pull/7445", "merged_at": "2025-03-13T10:37:58Z", "patch_url": "https://github.com/huggingface/datasets/pull/7445.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7445" }
https://api.github.com/repos/huggingface/datasets/issues/7444
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7444/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7444/comments
https://api.github.com/repos/huggingface/datasets/issues/7444/events
https://github.com/huggingface/datasets/issues/7444
2,911,202,445
I_kwDODunzps6thWyN
7,444
Excessive warnings when resuming an IterableDataset+buffered shuffle+DDP.
{ "avatar_url": "https://avatars.githubusercontent.com/u/4356534?v=4", "events_url": "https://api.github.com/users/dhruvdcoder/events{/privacy}", "followers_url": "https://api.github.com/users/dhruvdcoder/followers", "following_url": "https://api.github.com/users/dhruvdcoder/following{/other_user}", "gists_url": "https://api.github.com/users/dhruvdcoder/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/dhruvdcoder", "id": 4356534, "login": "dhruvdcoder", "node_id": "MDQ6VXNlcjQzNTY1MzQ=", "organizations_url": "https://api.github.com/users/dhruvdcoder/orgs", "received_events_url": "https://api.github.com/users/dhruvdcoder/received_events", "repos_url": "https://api.github.com/users/dhruvdcoder/repos", "site_admin": false, "starred_url": "https://api.github.com/users/dhruvdcoder/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhruvdcoder/subscriptions", "type": "User", "url": "https://api.github.com/users/dhruvdcoder", "user_view_type": "public" }
[]
open
false
null
[]
null
1
2025-03-11T16:34:39Z
2025-05-13T09:41:03Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug I have a large dataset that I shared into 1024 shards and save on the disk during pre-processing. During training, I load the dataset using load_from_disk() and convert it into an iterable dataset, shuffle it and split the shards to different DDP nodes using the recommended method. However, when the training is resumed mid-epoch, I get thousands of identical warning messages: ``` Loading a state dict of a shuffle buffer of a dataset without the buffer content.The shuffle buffer will be refilled before starting to yield new examples. ``` ### Steps to reproduce the bug 1. Run a multi-node training job using the following python script and interrupt the training after a few seconds to save a mid-epoch checkpoint. ```python #!/usr/bin/env python import os import time from typing import Dict, List import torch import lightning as pl from torch.utils.data import DataLoader from datasets import Dataset from datasets.distributed import split_dataset_by_node import datasets from transformers import AutoTokenizer from more_itertools import flatten, chunked from torchdata.stateful_dataloader import StatefulDataLoader from lightning.pytorch.callbacks.on_exception_checkpoint import ( OnExceptionCheckpoint, ) datasets.logging.set_verbosity_debug() def dummy_generator(): # Generate 60 examples: integers from $0$ to $59$ # 64 sequences of different lengths dataset = [ list(range(3, 10)), list(range(10, 15)), list(range(15, 21)), list(range(21, 27)), list(range(27, 31)), list(range(31, 36)), list(range(36, 45)), list(range(45, 50)), ] for i in range(8): for j, ids in enumerate(dataset): yield {"token_ids": [idx + i * 50 for idx in ids]} def group_texts( examples: Dict[str, List[List[int]]], block_size: int, eos_token_id: int, bos_token_id: int, pad_token_id: int, ) -> Dict[str, List[List[int]]]: real_block_size = block_size - 2 # make space for bos and eos # colapse the sequences into a single list of tokens and then create blocks of real_block_size input_ids = [] attention_mask = [] for block in chunked(flatten(examples["token_ids"]), real_block_size): s = [bos_token_id] + list(block) + [eos_token_id] ls = len(s) attn = [True] * ls s += [pad_token_id] * (block_size - ls) attn += [False] * (block_size - ls) input_ids.append(s) attention_mask.append(attn) return {"input_ids": input_ids, "attention_mask": attention_mask} def collate_fn(batch): return { "input_ids": torch.tensor( [item["input_ids"] for item in batch], dtype=torch.long ), "attention_mask": torch.tensor( [item["attention_mask"] for item in batch], dtype=torch.long ), } class DummyModule(pl.LightningModule): def __init__(self): super().__init__() # A dummy linear layer (not used for actual computation) self.layer = torch.nn.Linear(1, 1) self.ds = None self.prepare_data_per_node = False def on_train_start(self): # This hook is called once training begins on each process. print(f"[Rank {self.global_rank}] Training started.", flush=True) self.data_file = open(f"data_{self.global_rank}.txt", "w") def on_train_end(self): self.data_file.close() def training_step(self, batch, batch_idx): # Print batch information to verify data loading. time.sleep(5) # print("batch", batch, flush=True) print( f"\n[Rank {self.global_rank}] Training step, epoch {self.trainer.current_epoch}, batch {batch_idx}: {batch['input_ids']}", flush=True, ) self.data_file.write( f"[Rank {self.global_rank}] Training step, epoch {self.trainer.current_epoch}, batch {batch_idx}: {batch['input_ids']}\n" ) # Compute a dummy loss (here, simply a constant tensor) loss = torch.tensor(0.0, requires_grad=True) return loss def on_train_epoch_start(self): epoch = self.trainer.current_epoch print( f"[Rank {self.global_rank}] Training epoch {epoch} started.", flush=True, ) self.data_file.write( f"[Rank {self.global_rank}] Training epoch {epoch} started.\n" ) def configure_optimizers(self): # Return a dummy optimizer. return torch.optim.SGD(self.parameters(), lr=0.001) class DM(pl.LightningDataModule): def __init__(self): super().__init__() self.ds = None self.prepare_data_per_node = False def set_epoch(self, epoch: int): self.ds.set_epoch(epoch) def prepare_data(self): # download the dataset dataset = Dataset.from_generator(dummy_generator) # save the dataset dataset.save_to_disk("dataset", num_shards=4) def setup(self, stage: str): # load the dataset ds = datasets.load_from_disk("dataset").to_iterable_dataset( num_shards=4 ) ds = ds.map( group_texts, batched=True, batch_size=5, fn_kwargs={ "block_size": 5, "eos_token_id": 1, "bos_token_id": 0, "pad_token_id": 2, }, remove_columns=["token_ids"], ).shuffle(seed=42, buffer_size=8) ds = split_dataset_by_node( ds, rank=self.trainer.global_rank, world_size=self.trainer.world_size, ) self.ds = ds def train_dataloader(self): print( f"[Rank {self.trainer.global_rank}] Preparing train_dataloader...", flush=True, ) rank = self.trainer.global_rank print( f"[Rank {rank}] Global rank: {self.trainer.global_rank}", flush=True, ) world_size = self.trainer.world_size print(f"[Rank {rank}] World size: {world_size}", flush=True) return StatefulDataLoader( self.ds, batch_size=2, num_workers=2, collate_fn=collate_fn, drop_last=True, persistent_workers=True, ) if __name__ == "__main__": print("Starting Lightning training", flush=True) # Optionally, print some SLURM environment info for debugging. print(f"SLURM_NNODES: {os.environ.get('SLURM_NNODES', '1')}", flush=True) # Determine the number of nodes from SLURM (defaulting to 1 if not set) num_nodes = int(os.environ.get("SLURM_NNODES", "1")) model = DummyModule() dm = DM() on_exception = OnExceptionCheckpoint( dirpath="checkpoints", filename="on_exception", ) # Configure the Trainer to use distributed data parallel (DDP). trainer = pl.Trainer( accelerator="gpu" if torch.cuda.is_available() else "cpu", devices=1, strategy=( "ddp" if num_nodes > 1 else "auto" ), # Use DDP strategy for multi-node training. num_nodes=num_nodes, max_epochs=2, logger=False, enable_checkpointing=True, num_sanity_val_steps=0, enable_progress_bar=False, callbacks=[on_exception], ) # resume (uncomment to resume) # trainer.fit(model, datamodule=dm, ckpt_path="checkpoints/on_exception.ckpt") # train trainer.fit(model, datamodule=dm) ``` ```bash #!/bin/bash #SBATCH --job-name=pl_ddp_test #SBATCH --nodes=2 # Adjust number of nodes as needed #SBATCH --ntasks-per-node=1 # One GPU (process) per node #SBATCH --cpus-per-task=3 # At least as many dataloader workers as required #SBATCH --gres=gpu:1 # Request one GPU per node #SBATCH --time=00:10:00 # Job runtime (adjust as needed) #SBATCH --partition=gpu-preempt # Partition or queue name #SBATCH -o script.out # Disable Python output buffering. export PYTHONUNBUFFERED=1 echo "SLURM job starting on $(date)" echo "Running on nodes: $SLURM_NODELIST" echo "Current directory: $(pwd)" ls -l # Launch the script using srun so that each process starts the Lightning module. srun script.py ``` 2. Uncomment the "resume" line (second to last) and comment the original `trainer.fit` call (last line). It will produce the following log. ``` [Rank 0] Preparing train_dataloader... [Rank 0] Global rank: 0 [Rank 0] World size: 2 [Rank 1] Preparing train_dataloader... [Rank 1] Global rank: 1 [Rank 1] World size: 2 Loading a state dict of a shuffle buffer of a dataset without the buffer content.The shuffle buffer will be refilled before starting to yield new examples. Loading a state dict of a shuffle buffer of a dataset without the buffer content.The shuffle buffer will be refilled before starting to yield new examples. Loading a state dict of a shuffle buffer of a dataset without the buffer content.The shuffle buffer will be refilled before starting to yield new examples. Assigning 2 shards (or data sources) of the dataset to each node. Loading a state dict of a shuffle buffer of a dataset without the buffer content.The shuffle buffer will be refilled before starting to yield new examples. Loading a state dict of a shuffle buffer of a dataset without the buffer content.The shuffle buffer will be refilled before starting to yield new examples. node#0 dataloader worker#1, ': Starting to iterate over 1/2 shards. Loading a state dict of a shuffle buffer of a dataset without the buffer content.The shuffle buffer will be refilled before starting to yield new examples. node#0 dataloader worker#0, ': Starting to iterate over 1/2 shards. Loading a state dict of a shuffle buffer of a dataset without the buffer content.The shuffle buffer will be refilled before starting to yield new examples. Set __getitem__(key) output type to arrow for no columns (when key is int or slice) and don't output other (un-formatted) columns. Set __getitem__(key) output type to arrow for no columns (when key is int or slice) and don't output other (un-formatted) columns. node#0 dataloader worker#1, ': Finished iterating over 1/1 shards. node#0 dataloader worker#0, ': Finished iterating over 1/1 shards. Loading a state dict of a shuffle buffer of a dataset without the buffer content.The shuffle buffer will be refilled before starting to yield new examples. [Rank 0] Training started. [Rank 0] Training epoch 0 started. [Rank 0] Training epoch 1 started. Assigning 2 shards (or data sources) of the dataset to each node. Loading a state dict of a shuffle buffer of a dataset without the buffer content.The shuffle buffer will be refilled before starting to yield new examples. Loading a state dict of a shuffle buffer of a dataset without the buffer content.The shuffle buffer will be refilled before starting to yield new examples. node#0 dataloader worker#1, ': Starting to iterate over 1/2 shards. node#0 dataloader worker#0, ': Starting to iterate over 1/2 shards. Loading a state dict of a shuffle buffer of a dataset without the buffer content.The shuffle buffer will be refilled before starting to yield new examples. Loading a state dict of a shuffle buffer of a dataset without the buffer content.The shuffle buffer will be refilled before starting to yield new examples. Loading a state dict of a shuffle buffer of a dataset without the buffer content.The shuffle buffer will be refilled before starting to yield new examples. Loading a state dict of a shuffle buffer of a dataset without the buffer content.The shuffle buffer will be refilled before starting to yield new examples. node#1 dataloader worker#1, ': Starting to iterate over 1/2 shards. Loading a state dict of a shuffle buffer of a dataset without the buffer content.The shuffle buffer will be refilled before starting to yield new examples. node#1 dataloader worker#0, ': Starting to iterate over 1/2 shards. Loading a state dict of a shuffle buffer of a dataset without the buffer content.The shuffle buffer will be refilled before starting to yield new examples. Set __getitem__(key) output type to arrow for no columns (when key is int or slice) and don't output other (un-formatted) columns. Set __getitem__(key) output type to arrow for no columns (when key is int or slice) and don't output other (un-formatted) columns. node#0 dataloader worker#1, ': Finished iterating over 1/1 shards. node#0 dataloader worker#0, ': Finished iterating over 1/1 shards. `Trainer.fit` stopped: `max_epochs=2` reached. Set __getitem__(key) output type to arrow for no columns (when key is int or slice) and don't output other (un-formatted) columns. Set __getitem__(key) output type to arrow for no columns (when key is int or slice) and don't output other (un-formatted) columns. node#1 dataloader worker#1, ': Finished iterating over 1/1 shards. node#1 dataloader worker#0, ': Finished iterating over 1/1 shards. [Rank 1] Training started. [Rank 1] Training epoch 0 started. [Rank 1] Training epoch 1 started. Loading a state dict of a shuffle buffer of a dataset without the buffer content.The shuffle buffer will be refilled before starting to yield new examples. Loading a state dict of a shuffle buffer of a dataset without the buffer content.The shuffle buffer will be refilled before starting to yield new examples. node#1 dataloader worker#1, ': Starting to iterate over 1/2 shards. node#1 dataloader worker#0, ': Starting to iterate over 1/2 shards. Loading a state dict of a shuffle buffer of a dataset without the buffer content.The shuffle buffer will be refilled before starting to yield new examples. Loading a state dict of a shuffle buffer of a dataset without the buffer content.The shuffle buffer will be refilled before starting to yield new examples. Set __getitem__(key) output type to arrow for no columns (when key is int or slice) and don't output other (un-formatted) columns. Set __getitem__(key) output type to arrow for no columns (when key is int or slice) and don't output other (un-formatted) columns. node#1 dataloader worker#0, ': Finished iterating over 1/1 shards. node#1 dataloader worker#1, ': Finished iterating over 1/1 shards. ``` I'm also attaching the relevant state_dict to make sure that the state is being checkpointed as expected. ``` {'_iterator_finished': True, '_snapshot': {'_last_yielded_worker_id': 1, '_main_snapshot': {'_IterableDataset_len_called': None, '_base_seed': 3992758080362545099, '_index_sampler_state': {'samples_yielded': 64}, '_num_workers': 2, '_sampler_iter_state': None, '_sampler_iter_yielded': 32, '_shared_seed': None}, '_snapshot_step': 32, '_worker_snapshots': {'worker_0': {'dataset_state': {'ex_iterable': {'shard_example_idx': 0, 'shard_idx': 1}, 'num_examples_since_previous_state': 0, 'previous_state': {'shard_example_idx': 0, 'shard_idx': 1}, 'previous_state_example_idx': 33}, 'fetcher_state': {'dataset_iter_state': None, 'fetcher_ended': False}, 'worker_id': 0}, 'worker_1': {'dataset_state': {'ex_iterable': {'shard_example_idx': 0, 'shard_idx': 1}, 'num_examples_since_previous_state': 0, 'previous_state': {'shard_example_idx': 0, 'shard_idx': 1}, 'previous_state_example_idx': 33}, 'fetcher_state': {'dataset_iter_state': None, 'fetcher_ended': False}, 'worker_id': 1}}}, '_steps_since_snapshot': 0} ``` ### Expected behavior Since I'm following all the recommended steps, I don't expect to see any warning when resuming. Am I doing something wrong? Also, can someone explain why I'm seeing 20 identical messages in the log in this reproduction setting? I'm trying to understand why I see thousands of these messages with the actual dataset. One more surprising thing I noticed in the logs is the change in a number of shards per worker. In the following messages, the denominator changes from 2 to 1. ``` node#1 dataloader worker#1, ': Starting to iterate over 1/2 shards. ... node#1 dataloader worker#1, ': Finished iterating over 1/1 shards. ``` ### Environment info python: 3.11.10 datasets: 3.3.2 lightning: 2.3.1
null
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/7444/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7444/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7443
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7443/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7443/comments
https://api.github.com/repos/huggingface/datasets/issues/7443/events
https://github.com/huggingface/datasets/issues/7443
2,908,585,656
I_kwDODunzps6tXX64
7,443
index error when num_shards > len(dataset)
{ "avatar_url": "https://avatars.githubusercontent.com/u/17934496?v=4", "events_url": "https://api.github.com/users/eminorhan/events{/privacy}", "followers_url": "https://api.github.com/users/eminorhan/followers", "following_url": "https://api.github.com/users/eminorhan/following{/other_user}", "gists_url": "https://api.github.com/users/eminorhan/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/eminorhan", "id": 17934496, "login": "eminorhan", "node_id": "MDQ6VXNlcjE3OTM0NDk2", "organizations_url": "https://api.github.com/users/eminorhan/orgs", "received_events_url": "https://api.github.com/users/eminorhan/received_events", "repos_url": "https://api.github.com/users/eminorhan/repos", "site_admin": false, "starred_url": "https://api.github.com/users/eminorhan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/eminorhan/subscriptions", "type": "User", "url": "https://api.github.com/users/eminorhan", "user_view_type": "public" }
[]
open
false
null
[]
null
1
2025-03-10T22:40:59Z
2025-03-10T23:43:08Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
In `ds.push_to_hub()` and `ds.save_to_disk()`, `num_shards` must be smaller than or equal to the number of rows in the dataset, but currently this is not checked anywhere inside these functions. Attempting to invoke these functions with `num_shards > len(dataset)` should raise an informative `ValueError`. I frequently work with datasets with a small number of rows where each row is pretty large, so I often encounter this issue, where the function runs until the shard index in `ds.shard(num_shards, indx)` goes out of bounds. Ideally, a `ValueError` should be raised before reaching this point (i.e. as soon as `ds.push_to_hub()` or `ds.save_to_disk()` is invoked with `num_shards > len(dataset)`). It seems that adding something like: ```python if len(self) < num_shards: raise ValueError(f"num_shards ({num_shards}) must be smaller than or equal to the number of rows in the dataset ({len(self)}). Please either reduce num_shards or increase max_shard_size to make sure num_shards <= len(dataset).") ``` to the beginning of the definition of the `ds.shard()` function [here](https://github.com/huggingface/datasets/blob/f693f4e93aabafa878470c80fd42ddb10ec550d6/src/datasets/arrow_dataset.py#L4728) would deal with this issue for both `ds.push_to_hub()` and `ds.save_to_disk()`, but I'm not exactly sure if this is the best place to raise the `ValueError` (it seems that a more correct way to do it would be to write separate checks for `ds.push_to_hub()` and `ds.save_to_disk()`). I'd be happy to submit a PR if you think something along these lines would be acceptable.
null
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/7443/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7443/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7442
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7442/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7442/comments
https://api.github.com/repos/huggingface/datasets/issues/7442/events
https://github.com/huggingface/datasets/issues/7442
2,905,543,017
I_kwDODunzps6tLxFp
7,442
Flexible Loader
{ "avatar_url": "https://avatars.githubusercontent.com/u/13894030?v=4", "events_url": "https://api.github.com/users/dipta007/events{/privacy}", "followers_url": "https://api.github.com/users/dipta007/followers", "following_url": "https://api.github.com/users/dipta007/following{/other_user}", "gists_url": "https://api.github.com/users/dipta007/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/dipta007", "id": 13894030, "login": "dipta007", "node_id": "MDQ6VXNlcjEzODk0MDMw", "organizations_url": "https://api.github.com/users/dipta007/orgs", "received_events_url": "https://api.github.com/users/dipta007/received_events", "repos_url": "https://api.github.com/users/dipta007/repos", "site_admin": false, "starred_url": "https://api.github.com/users/dipta007/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dipta007/subscriptions", "type": "User", "url": "https://api.github.com/users/dipta007", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
null
[]
null
3
2025-03-09T16:55:03Z
2025-03-27T23:58:17Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Feature request Can we have a utility function that will use `load_from_disk` when given the local path and `load_dataset` if given an HF dataset? It can be something as simple as this one: ``` def load_hf_dataset(path_or_name): if os.path.exists(path_or_name): return load_from_disk(path_or_name) else: return load_dataset(path_or_name) ``` ### Motivation This can be done inside the user codebase, too, but in my experience, it becomes repetitive code. ### Your contribution I can open a pull request.
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7442/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7442/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7441
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7441/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7441/comments
https://api.github.com/repos/huggingface/datasets/issues/7441/events
https://github.com/huggingface/datasets/issues/7441
2,904,702,329
I_kwDODunzps6tIj15
7,441
`drop_last_batch` does not drop the last batch using IterableDataset + interleave_datasets + multi_worker
{ "avatar_url": "https://avatars.githubusercontent.com/u/4197249?v=4", "events_url": "https://api.github.com/users/memray/events{/privacy}", "followers_url": "https://api.github.com/users/memray/followers", "following_url": "https://api.github.com/users/memray/following{/other_user}", "gists_url": "https://api.github.com/users/memray/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/memray", "id": 4197249, "login": "memray", "node_id": "MDQ6VXNlcjQxOTcyNDk=", "organizations_url": "https://api.github.com/users/memray/orgs", "received_events_url": "https://api.github.com/users/memray/received_events", "repos_url": "https://api.github.com/users/memray/repos", "site_admin": false, "starred_url": "https://api.github.com/users/memray/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/memray/subscriptions", "type": "User", "url": "https://api.github.com/users/memray", "user_view_type": "public" }
[]
open
false
null
[]
null
2
2025-03-08T10:28:44Z
2025-03-09T21:27:33Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug See the script below `drop_last_batch=True` is defined using map() for each dataset. The last batch for each dataset is expected to be dropped, id 21-25. The code behaves as expected when num_workers=0 or 1. When using num_workers>1, 'a-11', 'b-11', 'a-12', 'b-12' are gone and instead 21 and 22 are sampled. ### Steps to reproduce the bug ``` from datasets import Dataset from datasets import interleave_datasets from torch.utils.data import DataLoader def convert_to_str(batch, dataset_name): batch['a'] = [f"{dataset_name}-{e}" for e in batch['a']] return batch def gen1(): for ii in range(1, 25): yield {"a": ii} def gen2(): for ii in range(1, 25): yield {"a": ii} # https://github.com/huggingface/datasets/issues/6565 if __name__ == '__main__': dataset1 = Dataset.from_generator(gen1).to_iterable_dataset(num_shards=2) dataset2 = Dataset.from_generator(gen2).to_iterable_dataset(num_shards=2) dataset1 = dataset1.map(lambda x: convert_to_str(x, dataset_name="a"), batched=True, batch_size=10, drop_last_batch=True) dataset2 = dataset2.map(lambda x: convert_to_str(x, dataset_name="b"), batched=True, batch_size=10, drop_last_batch=True) interleaved = interleave_datasets([dataset1, dataset2], stopping_strategy="all_exhausted") print(f"num_workers=0") loader = DataLoader(interleaved, batch_size=5, num_workers=0) i = 0 for b in loader: print(i, b['a']) i += 1 print('=-' * 20) print(f"num_workers=1") loader = DataLoader(interleaved, batch_size=5, num_workers=1) i = 0 for b in loader: print(i, b['a']) i += 1 print('=-' * 20) print(f"num_workers=2") loader = DataLoader(interleaved, batch_size=5, num_workers=2) i = 0 for b in loader: print(i, b['a']) i += 1 print('=-' * 20) print(f"num_workers=3") loader = DataLoader(interleaved, batch_size=5, num_workers=3) i = 0 for b in loader: print(i, b['a']) i += 1 ``` output is: ``` num_workers=0 0 ['a-1', 'b-1', 'a-2', 'b-2', 'a-3'] 1 ['b-3', 'a-4', 'b-4', 'a-5', 'b-5'] 2 ['a-6', 'b-6', 'a-7', 'b-7', 'a-8'] 3 ['b-8', 'a-9', 'b-9', 'a-10', 'b-10'] 4 ['a-11', 'b-11', 'a-12', 'b-12', 'a-13'] 5 ['b-13', 'a-14', 'b-14', 'a-15', 'b-15'] 6 ['a-16', 'b-16', 'a-17', 'b-17', 'a-18'] 7 ['b-18', 'a-19', 'b-19', 'a-20', 'b-20'] =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- num_workers=1 0 ['a-1', 'b-1', 'a-2', 'b-2', 'a-3'] 1 ['b-3', 'a-4', 'b-4', 'a-5', 'b-5'] 2 ['a-6', 'b-6', 'a-7', 'b-7', 'a-8'] 3 ['b-8', 'a-9', 'b-9', 'a-10', 'b-10'] 4 ['a-11', 'b-11', 'a-12', 'b-12', 'a-13'] 5 ['b-13', 'a-14', 'b-14', 'a-15', 'b-15'] 6 ['a-16', 'b-16', 'a-17', 'b-17', 'a-18'] 7 ['b-18', 'a-19', 'b-19', 'a-20', 'b-20'] =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- num_workers=2 0 ['a-1', 'b-1', 'a-2', 'b-2', 'a-3'] 1 ['a-13', 'b-13', 'a-14', 'b-14', 'a-15'] 2 ['b-3', 'a-4', 'b-4', 'a-5', 'b-5'] 3 ['b-15', 'a-16', 'b-16', 'a-17', 'b-17'] 4 ['a-6', 'b-6', 'a-7', 'b-7', 'a-8'] 5 ['a-18', 'b-18', 'a-19', 'b-19', 'a-20'] 6 ['b-8', 'a-9', 'b-9', 'a-10', 'b-10'] 7 ['b-20', 'a-21', 'b-21', 'a-22', 'b-22'] =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- num_workers=3 Too many dataloader workers: 3 (max is dataset.num_shards=2). Stopping 1 dataloader workers. 0 ['a-1', 'b-1', 'a-2', 'b-2', 'a-3'] 1 ['a-13', 'b-13', 'a-14', 'b-14', 'a-15'] 2 ['b-3', 'a-4', 'b-4', 'a-5', 'b-5'] 3 ['b-15', 'a-16', 'b-16', 'a-17', 'b-17'] 4 ['a-6', 'b-6', 'a-7', 'b-7', 'a-8'] 5 ['a-18', 'b-18', 'a-19', 'b-19', 'a-20'] 6 ['b-8', 'a-9', 'b-9', 'a-10', 'b-10'] 7 ['b-20', 'a-21', 'b-21', 'a-22', 'b-22'] ``` ### Expected behavior `'a-21', 'b-21', 'a-22', 'b-22'` should be dropped ### Environment info - `datasets` version: 3.3.2 - Platform: Linux-5.15.0-1056-aws-x86_64-with-glibc2.31 - Python version: 3.10.16 - `huggingface_hub` version: 0.28.0 - PyArrow version: 19.0.0 - Pandas version: 2.2.3 - `fsspec` version: 2024.6.1
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7441/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7441/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7440
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7440/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7440/comments
https://api.github.com/repos/huggingface/datasets/issues/7440/events
https://github.com/huggingface/datasets/issues/7440
2,903,740,662
I_kwDODunzps6tE5D2
7,440
IterableDataset raises FileNotFoundError instead of retrying
{ "avatar_url": "https://avatars.githubusercontent.com/u/145220868?v=4", "events_url": "https://api.github.com/users/bauwenst/events{/privacy}", "followers_url": "https://api.github.com/users/bauwenst/followers", "following_url": "https://api.github.com/users/bauwenst/following{/other_user}", "gists_url": "https://api.github.com/users/bauwenst/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/bauwenst", "id": 145220868, "login": "bauwenst", "node_id": "U_kgDOCKflBA", "organizations_url": "https://api.github.com/users/bauwenst/orgs", "received_events_url": "https://api.github.com/users/bauwenst/received_events", "repos_url": "https://api.github.com/users/bauwenst/repos", "site_admin": false, "starred_url": "https://api.github.com/users/bauwenst/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bauwenst/subscriptions", "type": "User", "url": "https://api.github.com/users/bauwenst", "user_view_type": "public" }
[]
open
false
null
[]
null
6
2025-03-07T19:14:18Z
2025-04-17T23:40:35Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug In https://github.com/huggingface/datasets/issues/6843 it was noted that the streaming feature of `datasets` is highly susceptible to outages and doesn't back off for long (or even *at all*). I was training a model while streaming SlimPajama and training crashed with a `FileNotFoundError`. I can only assume that this was due to a momentary outage considering the file in question, `train/chunk9/example_train_3889.jsonl.zst`, [exists like all other files in SlimPajama](https://huggingface.co/datasets/cerebras/SlimPajama-627B/blob/main/train/chunk9/example_train_3889.jsonl.zst). ```python ... File "/miniconda3/envs/draft/lib/python3.11/site-packages/datasets/iterable_dataset.py", line 2226, in __iter__ for key, example in ex_iterable: File "/miniconda3/envs/draft/lib/python3.11/site-packages/datasets/iterable_dataset.py", line 1499, in __iter__ for x in self.ex_iterable: File "/miniconda3/envs/draft/lib/python3.11/site-packages/datasets/iterable_dataset.py", line 1067, in __iter__ yield from self._iter() File "/miniconda3/envs/draft/lib/python3.11/site-packages/datasets/iterable_dataset.py", line 1231, in _iter for key, transformed_example in iter_outputs(): File "/miniconda3/envs/draft/lib/python3.11/site-packages/datasets/iterable_dataset.py", line 1207, in iter_outputs for i, key_example in inputs_iterator: File "/miniconda3/envs/draft/lib/python3.11/site-packages/datasets/iterable_dataset.py", line 1111, in iter_inputs for key, example in iterator: File "/miniconda3/envs/draft/lib/python3.11/site-packages/datasets/iterable_dataset.py", line 371, in __iter__ for key, pa_table in self.generate_tables_fn(**gen_kwags): File "/miniconda3/envs/draft/lib/python3.11/site-packages/datasets/packaged_modules/json/json.py", line 99, in _generate_tables for file_idx, file in enumerate(itertools.chain.from_iterable(files)): File "/miniconda3/envs/draft/lib/python3.11/site-packages/datasets/utils/track.py", line 50, in __iter__ for x in self.generator(*self.args): File "/miniconda3/envs/draft/lib/python3.11/site-packages/datasets/utils/file_utils.py", line 1378, in _iter_from_urlpaths raise FileNotFoundError(urlpath) FileNotFoundError: zstd://example_train_3889.jsonl::hf://datasets/cerebras/SlimPajama-627B@2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/train/chunk9/example_train_3889.jsonl.zst ``` That final `raise` is at the bottom of the following snippet: https://github.com/huggingface/datasets/blob/f693f4e93aabafa878470c80fd42ddb10ec550d6/src/datasets/utils/file_utils.py#L1354-L1379 So clearly, something choked up in `xisfile`. ### Steps to reproduce the bug This happens when streaming a dataset and iterating over it. In my case, that iteration is done in Trainer's `inner_training_loop`, but this is not relevant to the iterator. ```python File "/miniconda3/envs/draft/lib/python3.11/site-packages/accelerate/data_loader.py", line 835, in __iter__ next_batch, next_batch_info = self._fetch_batches(main_iterator) ``` ### Expected behavior This bug and the linked issue have one thing in common: *when streaming fails to retrieve an example, the entire program gives up and crashes*. As users, we cannot even protect ourselves from this: when we are iterating over a dataset, we can't make `datasets` skip over a bad example or wait a little longer to retry the iteration, because when a Python generator/iterator raises an error, it loses all its context. In other words: if you have something that looks like `for b in a: for c in b: for d in c:`, errors in the innermost loop can only be caught by a `try ... except` in `c.__iter__()`. There should be such exception handling in `datasets` and it should have a **configurable exponential back-off**: first wait and retry after 1 minute, then 2 minutes, then 4 minutes, then 8 minutes, ... and after a given amount of retries, **skip the bad example**, and **only after** skipping a given amount of examples, give up and crash. This was requested in https://github.com/huggingface/datasets/issues/6843 too, since currently there is only linear backoff *and* it is clearly not applied to `xisfile`. ### Environment info - `datasets` version: 3.3.2 *(the latest version)* - Platform: Linux-4.18.0-513.24.1.el8_9.x86_64-x86_64-with-glibc2.28 - Python version: 3.11.7 - `huggingface_hub` version: 0.26.5 - PyArrow version: 15.0.0 - Pandas version: 2.2.0 - `fsspec` version: 2024.10.0
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7440/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7440/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7439
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7439/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7439/comments
https://api.github.com/repos/huggingface/datasets/issues/7439/events
https://github.com/huggingface/datasets/pull/7439
2,900,143,289
PR_kwDODunzps6NoCdD
7,439
Fix multi gpu process example
{ "avatar_url": "https://avatars.githubusercontent.com/u/46050679?v=4", "events_url": "https://api.github.com/users/SwayStar123/events{/privacy}", "followers_url": "https://api.github.com/users/SwayStar123/followers", "following_url": "https://api.github.com/users/SwayStar123/following{/other_user}", "gists_url": "https://api.github.com/users/SwayStar123/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/SwayStar123", "id": 46050679, "login": "SwayStar123", "node_id": "MDQ6VXNlcjQ2MDUwNjc5", "organizations_url": "https://api.github.com/users/SwayStar123/orgs", "received_events_url": "https://api.github.com/users/SwayStar123/received_events", "repos_url": "https://api.github.com/users/SwayStar123/repos", "site_admin": false, "starred_url": "https://api.github.com/users/SwayStar123/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SwayStar123/subscriptions", "type": "User", "url": "https://api.github.com/users/SwayStar123", "user_view_type": "public" }
[]
closed
false
null
[]
null
1
2025-03-06T11:29:19Z
2025-03-06T17:07:28Z
2025-03-06T17:06:38Z
NONE
null
null
null
to is not an inplace function. But i am not sure about this code anyway, i think this is modifying the global variable `model` everytime the function is called? Which is on every batch? So it is juggling the same model on every gpu right? Isnt that very inefficient?
{ "avatar_url": "https://avatars.githubusercontent.com/u/46050679?v=4", "events_url": "https://api.github.com/users/SwayStar123/events{/privacy}", "followers_url": "https://api.github.com/users/SwayStar123/followers", "following_url": "https://api.github.com/users/SwayStar123/following{/other_user}", "gists_url": "https://api.github.com/users/SwayStar123/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/SwayStar123", "id": 46050679, "login": "SwayStar123", "node_id": "MDQ6VXNlcjQ2MDUwNjc5", "organizations_url": "https://api.github.com/users/SwayStar123/orgs", "received_events_url": "https://api.github.com/users/SwayStar123/received_events", "repos_url": "https://api.github.com/users/SwayStar123/repos", "site_admin": false, "starred_url": "https://api.github.com/users/SwayStar123/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SwayStar123/subscriptions", "type": "User", "url": "https://api.github.com/users/SwayStar123", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7439/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7439/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7439.diff", "html_url": "https://github.com/huggingface/datasets/pull/7439", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/7439.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7439" }
https://api.github.com/repos/huggingface/datasets/issues/7438
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7438/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7438/comments
https://api.github.com/repos/huggingface/datasets/issues/7438/events
https://github.com/huggingface/datasets/pull/7438
2,899,209,484
PR_kwDODunzps6Nk37h
7,438
Allow dataset row indexing with np.int types (#7423)
{ "avatar_url": "https://avatars.githubusercontent.com/u/35470740?v=4", "events_url": "https://api.github.com/users/DavidRConnell/events{/privacy}", "followers_url": "https://api.github.com/users/DavidRConnell/followers", "following_url": "https://api.github.com/users/DavidRConnell/following{/other_user}", "gists_url": "https://api.github.com/users/DavidRConnell/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/DavidRConnell", "id": 35470740, "login": "DavidRConnell", "node_id": "MDQ6VXNlcjM1NDcwNzQw", "organizations_url": "https://api.github.com/users/DavidRConnell/orgs", "received_events_url": "https://api.github.com/users/DavidRConnell/received_events", "repos_url": "https://api.github.com/users/DavidRConnell/repos", "site_admin": false, "starred_url": "https://api.github.com/users/DavidRConnell/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/DavidRConnell/subscriptions", "type": "User", "url": "https://api.github.com/users/DavidRConnell", "user_view_type": "public" }
[]
open
false
null
[]
null
0
2025-03-06T03:10:43Z
2025-03-06T03:10:43Z
null
NONE
null
null
null
@lhoestq Proposed fix for #7423. Added a couple simple tests as requested. I had some test failures related to Java and pyspark even when installing with dev but these don't seem to be related to the changes here and fail for me even on clean main. The typeerror raised when using the wrong type is: "Wrong key type: '{key}' of type '{type(key)}'. Expected one of int, slice, range, str or Iterable." I think that is fine. But I could modify the int part to something more generic (although I'm not sure what) if wanted.
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7438/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7438/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7438.diff", "html_url": "https://github.com/huggingface/datasets/pull/7438", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/7438.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7438" }
https://api.github.com/repos/huggingface/datasets/issues/7437
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7437/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7437/comments
https://api.github.com/repos/huggingface/datasets/issues/7437/events
https://github.com/huggingface/datasets/pull/7437
2,899,104,679
PR_kwDODunzps6Nkhla
7,437
Use pyupgrade --py39-plus for remaining files
{ "avatar_url": "https://avatars.githubusercontent.com/u/17618148?v=4", "events_url": "https://api.github.com/users/cyyever/events{/privacy}", "followers_url": "https://api.github.com/users/cyyever/followers", "following_url": "https://api.github.com/users/cyyever/following{/other_user}", "gists_url": "https://api.github.com/users/cyyever/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/cyyever", "id": 17618148, "login": "cyyever", "node_id": "MDQ6VXNlcjE3NjE4MTQ4", "organizations_url": "https://api.github.com/users/cyyever/orgs", "received_events_url": "https://api.github.com/users/cyyever/received_events", "repos_url": "https://api.github.com/users/cyyever/repos", "site_admin": false, "starred_url": "https://api.github.com/users/cyyever/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cyyever/subscriptions", "type": "User", "url": "https://api.github.com/users/cyyever", "user_view_type": "public" }
[]
open
false
null
[]
null
0
2025-03-06T02:12:25Z
2025-04-15T14:47:54Z
null
CONTRIBUTOR
null
null
null
This work follows #7428. And "requires-python" is set in pyproject.toml
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7437/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7437/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7437.diff", "html_url": "https://github.com/huggingface/datasets/pull/7437", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/7437.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7437" }
https://api.github.com/repos/huggingface/datasets/issues/7436
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7436/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7436/comments
https://api.github.com/repos/huggingface/datasets/issues/7436/events
https://github.com/huggingface/datasets/pull/7436
2,898,385,725
PR_kwDODunzps6NiArv
7,436
chore: fix typos
{ "avatar_url": "https://avatars.githubusercontent.com/u/35225576?v=4", "events_url": "https://api.github.com/users/afuetterer/events{/privacy}", "followers_url": "https://api.github.com/users/afuetterer/followers", "following_url": "https://api.github.com/users/afuetterer/following{/other_user}", "gists_url": "https://api.github.com/users/afuetterer/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/afuetterer", "id": 35225576, "login": "afuetterer", "node_id": "MDQ6VXNlcjM1MjI1NTc2", "organizations_url": "https://api.github.com/users/afuetterer/orgs", "received_events_url": "https://api.github.com/users/afuetterer/received_events", "repos_url": "https://api.github.com/users/afuetterer/repos", "site_admin": false, "starred_url": "https://api.github.com/users/afuetterer/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/afuetterer/subscriptions", "type": "User", "url": "https://api.github.com/users/afuetterer", "user_view_type": "public" }
[]
closed
false
null
[]
null
0
2025-03-05T20:17:54Z
2025-04-28T14:00:09Z
2025-04-28T13:51:26Z
CONTRIBUTOR
null
null
null
null
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7436/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7436/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7436.diff", "html_url": "https://github.com/huggingface/datasets/pull/7436", "merged_at": "2025-04-28T13:51:26Z", "patch_url": "https://github.com/huggingface/datasets/pull/7436.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7436" }
https://api.github.com/repos/huggingface/datasets/issues/7435
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7435/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7435/comments
https://api.github.com/repos/huggingface/datasets/issues/7435/events
https://github.com/huggingface/datasets/pull/7435
2,895,536,956
PR_kwDODunzps6NYUnr
7,435
Refactor `string_to_dict` to return `None` if there is no match instead of raising `ValueError`
{ "avatar_url": "https://avatars.githubusercontent.com/u/27844407?v=4", "events_url": "https://api.github.com/users/ringohoffman/events{/privacy}", "followers_url": "https://api.github.com/users/ringohoffman/followers", "following_url": "https://api.github.com/users/ringohoffman/following{/other_user}", "gists_url": "https://api.github.com/users/ringohoffman/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ringohoffman", "id": 27844407, "login": "ringohoffman", "node_id": "MDQ6VXNlcjI3ODQ0NDA3", "organizations_url": "https://api.github.com/users/ringohoffman/orgs", "received_events_url": "https://api.github.com/users/ringohoffman/received_events", "repos_url": "https://api.github.com/users/ringohoffman/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ringohoffman/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ringohoffman/subscriptions", "type": "User", "url": "https://api.github.com/users/ringohoffman", "user_view_type": "public" }
[]
closed
false
null
[]
null
8
2025-03-04T22:01:20Z
2025-03-12T16:52:00Z
2025-03-12T16:52:00Z
NONE
null
null
null
Making this change, as encouraged here: * https://github.com/huggingface/datasets/pull/7434#discussion_r1979933054 instead of having the pattern of using `try`-`except` to handle when there is no match, we can instead check if the return value is `None`; we can also assert that the return value should not be `None` if we know that should be true
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7435/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7435/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7435.diff", "html_url": "https://github.com/huggingface/datasets/pull/7435", "merged_at": "2025-03-12T16:51:59Z", "patch_url": "https://github.com/huggingface/datasets/pull/7435.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7435" }
https://api.github.com/repos/huggingface/datasets/issues/7434
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7434/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7434/comments
https://api.github.com/repos/huggingface/datasets/issues/7434/events
https://github.com/huggingface/datasets/pull/7434
2,893,075,908
PR_kwDODunzps6NP-vn
7,434
Refactor `Dataset.map` to reuse cache files mapped with different `num_proc`
{ "avatar_url": "https://avatars.githubusercontent.com/u/27844407?v=4", "events_url": "https://api.github.com/users/ringohoffman/events{/privacy}", "followers_url": "https://api.github.com/users/ringohoffman/followers", "following_url": "https://api.github.com/users/ringohoffman/following{/other_user}", "gists_url": "https://api.github.com/users/ringohoffman/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ringohoffman", "id": 27844407, "login": "ringohoffman", "node_id": "MDQ6VXNlcjI3ODQ0NDA3", "organizations_url": "https://api.github.com/users/ringohoffman/orgs", "received_events_url": "https://api.github.com/users/ringohoffman/received_events", "repos_url": "https://api.github.com/users/ringohoffman/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ringohoffman/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ringohoffman/subscriptions", "type": "User", "url": "https://api.github.com/users/ringohoffman", "user_view_type": "public" }
[]
closed
false
null
[]
null
10
2025-03-04T06:12:37Z
2025-05-14T10:45:10Z
2025-05-12T15:14:08Z
NONE
null
null
null
Fixes #7433 This refactor unifies `num_proc is None or num_proc == 1` and `num_proc > 1`; instead of handling them completely separately where one uses a list of kwargs and shards and the other just uses a single set of kwargs and `self`, by wrapping the `num_proc == 1` case in a list and making the difference just whether or not you use a pool, you set up either case to be able to load each other's cache files just by changing `num_shards`; `num_proc == 1` can sequentially load the shards of a dataset mapped `num_shards > 1` and map any missing shards Other than the structural refactor, the main contribution of this PR is `existing_cache_file_map`, which uses a regex of `cache_file_name` and `suffix_template` to find existing cache files, grouped by their `num_shards`; using this data structure, we can reset `num_shards` to an existing set of cache files, and load them accordingly
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7434/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7434/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7434.diff", "html_url": "https://github.com/huggingface/datasets/pull/7434", "merged_at": "2025-05-12T15:14:08Z", "patch_url": "https://github.com/huggingface/datasets/pull/7434.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7434" }
https://api.github.com/repos/huggingface/datasets/issues/7433
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7433/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7433/comments
https://api.github.com/repos/huggingface/datasets/issues/7433/events
https://github.com/huggingface/datasets/issues/7433
2,890,240,400
I_kwDODunzps6sRZGQ
7,433
`Dataset.map` ignores existing caches and remaps when ran with different `num_proc`
{ "avatar_url": "https://avatars.githubusercontent.com/u/27844407?v=4", "events_url": "https://api.github.com/users/ringohoffman/events{/privacy}", "followers_url": "https://api.github.com/users/ringohoffman/followers", "following_url": "https://api.github.com/users/ringohoffman/following{/other_user}", "gists_url": "https://api.github.com/users/ringohoffman/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ringohoffman", "id": 27844407, "login": "ringohoffman", "node_id": "MDQ6VXNlcjI3ODQ0NDA3", "organizations_url": "https://api.github.com/users/ringohoffman/orgs", "received_events_url": "https://api.github.com/users/ringohoffman/received_events", "repos_url": "https://api.github.com/users/ringohoffman/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ringohoffman/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ringohoffman/subscriptions", "type": "User", "url": "https://api.github.com/users/ringohoffman", "user_view_type": "public" }
[]
closed
false
null
[]
null
2
2025-03-03T05:51:26Z
2025-05-12T15:14:09Z
2025-05-12T15:14:09Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug If you `map` a dataset and save it to a specific `cache_file_name` with a specific `num_proc`, and then call map again with that same existing `cache_file_name` but a different `num_proc`, the dataset will be re-mapped. ### Steps to reproduce the bug 1. Download a dataset ```python import datasets dataset = datasets.load_dataset("ylecun/mnist") ``` ``` Generating train split: 100%|██████████| 60000/60000 [00:00<00:00, 116429.85 examples/s] Generating test split: 100%|██████████| 10000/10000 [00:00<00:00, 103310.27 examples/s] ``` 2. `map` and cache it with a specific `num_proc` ```python cache_file_name="./cache/train.map" dataset["train"].map(lambda x: x, cache_file_name=cache_file_name, num_proc=2) ``` ``` Map (num_proc=2): 100%|██████████| 60000/60000 [00:01<00:00, 53764.03 examples/s] ``` 3. `map` it with a different `num_proc` and the same `cache_file_name` as before ```python dataset["train"].map(lambda x: x, cache_file_name=cache_file_name, num_proc=3) ``` ``` Map (num_proc=3): 100%|██████████| 60000/60000 [00:00<00:00, 65377.12 examples/s] ``` ### Expected behavior If I specify an existing `cache_file_name`, I don't expect using a different `num_proc` than the one that was used to generate it to cause the dataset to have be be re-mapped. ### Environment info ```console $ datasets-cli env - `datasets` version: 3.3.2 - Platform: Linux-5.15.0-131-generic-x86_64-with-glibc2.35 - Python version: 3.10.16 - `huggingface_hub` version: 0.29.1 - PyArrow version: 19.0.1 - Pandas version: 2.2.3 - `fsspec` version: 2024.12.0 ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7433/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7433/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/7432
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7432/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7432/comments
https://api.github.com/repos/huggingface/datasets/issues/7432/events
https://github.com/huggingface/datasets/pull/7432
2,887,717,289
PR_kwDODunzps6M-DI0
7,432
Fix type annotation
{ "avatar_url": "https://avatars.githubusercontent.com/u/730137?v=4", "events_url": "https://api.github.com/users/NeilGirdhar/events{/privacy}", "followers_url": "https://api.github.com/users/NeilGirdhar/followers", "following_url": "https://api.github.com/users/NeilGirdhar/following{/other_user}", "gists_url": "https://api.github.com/users/NeilGirdhar/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/NeilGirdhar", "id": 730137, "login": "NeilGirdhar", "node_id": "MDQ6VXNlcjczMDEzNw==", "organizations_url": "https://api.github.com/users/NeilGirdhar/orgs", "received_events_url": "https://api.github.com/users/NeilGirdhar/received_events", "repos_url": "https://api.github.com/users/NeilGirdhar/repos", "site_admin": false, "starred_url": "https://api.github.com/users/NeilGirdhar/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/NeilGirdhar/subscriptions", "type": "User", "url": "https://api.github.com/users/NeilGirdhar", "user_view_type": "public" }
[]
closed
false
null
[]
null
1
2025-02-28T17:28:20Z
2025-03-04T15:53:03Z
2025-03-04T15:53:03Z
CONTRIBUTOR
null
null
null
null
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7432/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7432/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7432.diff", "html_url": "https://github.com/huggingface/datasets/pull/7432", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/7432.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7432" }
https://api.github.com/repos/huggingface/datasets/issues/7431
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7431/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7431/comments
https://api.github.com/repos/huggingface/datasets/issues/7431/events
https://github.com/huggingface/datasets/issues/7431
2,887,244,074
I_kwDODunzps6sF9kq
7,431
Issues with large Datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/106806889?v=4", "events_url": "https://api.github.com/users/nikitabelooussovbtis/events{/privacy}", "followers_url": "https://api.github.com/users/nikitabelooussovbtis/followers", "following_url": "https://api.github.com/users/nikitabelooussovbtis/following{/other_user}", "gists_url": "https://api.github.com/users/nikitabelooussovbtis/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/nikitabelooussovbtis", "id": 106806889, "login": "nikitabelooussovbtis", "node_id": "U_kgDOBl2-aQ", "organizations_url": "https://api.github.com/users/nikitabelooussovbtis/orgs", "received_events_url": "https://api.github.com/users/nikitabelooussovbtis/received_events", "repos_url": "https://api.github.com/users/nikitabelooussovbtis/repos", "site_admin": false, "starred_url": "https://api.github.com/users/nikitabelooussovbtis/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nikitabelooussovbtis/subscriptions", "type": "User", "url": "https://api.github.com/users/nikitabelooussovbtis", "user_view_type": "public" }
[]
open
false
null
[]
null
4
2025-02-28T14:05:22Z
2025-03-04T15:02:26Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug If the coco annotation file is too large the dataset will not be able to load it, not entirely sure were the issue is but I am guessing it is due to the code trying to load it all as one line into a dataframe. This was for object detections. My current work around is the following code but would be nice to be able to do it without worrying about it also probably there is a better way of doing it: ` dataset_dict = json.load(open("./local_data/annotations/train.json")) df = pd.DataFrame(columns=['images', 'annotations', 'categories']) df = df._append({'images': dataset_dict['images'], 'annotations': dataset_dict['annotations'], 'categories': dataset_dict['categories']}, ignore_index=True) train=Dataset.from_pandas(df) dataset_dict = json.load(open("./local_data/annotations/validation.json")) df = pd.DataFrame(columns=['images', 'annotations', 'categories']) df = df._append({'images': dataset_dict['images'], 'annotations': dataset_dict['annotations'], 'categories': dataset_dict['categories']}, ignore_index=True) val = Dataset.from_pandas(df) dataset_dict = json.load(open("./local_data/annotations/test.json")) df = pd.DataFrame(columns=['images', 'annotations', 'categories']) df = df._append({'images': dataset_dict['images'], 'annotations': dataset_dict['annotations'], 'categories': dataset_dict['categories']}, ignore_index=True) test = Dataset.from_pandas(df) dataset = DatasetDict({'train': train, 'validation': val, 'test': test}) ` ### Steps to reproduce the bug 1) step up directory in and have the json files in coco format -local_data |-images |---1.jpg |---2.jpg |---.... |---n.jpg |-annotations |---test.json |---train.json |---validation.json 2) try to load local_data into a dataset if the file is larger than about 300kb it will cause an error. ### Expected behavior That it loads the jsons preferably in the same format as it has done with a smaller size. ### Environment info - `datasets` version: 3.3.3.dev0 - Platform: Linux-6.11.0-17-generic-x86_64-with-glibc2.39 - Python version: 3.12.3 - `huggingface_hub` version: 0.29.0 - PyArrow version: 19.0.1 - Pandas version: 2.2.3 - `fsspec` version: 2024.12.0
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7431/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7431/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7430
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7430/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7430/comments
https://api.github.com/repos/huggingface/datasets/issues/7430/events
https://github.com/huggingface/datasets/issues/7430
2,886,922,573
I_kwDODunzps6sEvFN
7,430
Error in code "Time to slice and dice" from course "NLP Course"
{ "avatar_url": "https://avatars.githubusercontent.com/u/122965300?v=4", "events_url": "https://api.github.com/users/Yurkmez/events{/privacy}", "followers_url": "https://api.github.com/users/Yurkmez/followers", "following_url": "https://api.github.com/users/Yurkmez/following{/other_user}", "gists_url": "https://api.github.com/users/Yurkmez/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Yurkmez", "id": 122965300, "login": "Yurkmez", "node_id": "U_kgDOB1RNNA", "organizations_url": "https://api.github.com/users/Yurkmez/orgs", "received_events_url": "https://api.github.com/users/Yurkmez/received_events", "repos_url": "https://api.github.com/users/Yurkmez/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Yurkmez/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Yurkmez/subscriptions", "type": "User", "url": "https://api.github.com/users/Yurkmez", "user_view_type": "public" }
[]
closed
false
null
[]
null
2
2025-02-28T11:36:10Z
2025-03-05T11:32:47Z
2025-03-03T17:52:15Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug When we execute code ``` frequencies = ( train_df["condition"] .value_counts() .to_frame() .reset_index() .rename(columns={"index": "condition", "condition": "frequency"}) ) frequencies.head() ``` answer should be like this condition | frequency birth control | 27655 depression | 8023 acne | 5209 anxiety | 4991 pain | 4744 but he is different frequency | count birth control | 27655 depression | 8023 acne | 5209 anxiety | 4991 pain | 4744 this is not correct, correct code ``` frequencies = ( train_df["condition"] .value_counts() .to_frame() .reset_index() .rename(columns={"index": "condition", "count": "frequency"}) ) ```` ### Steps to reproduce the bug ``` frequencies = ( train_df["condition"] .value_counts() .to_frame() .reset_index() .rename(columns={"index": "condition", "condition": "frequency"}) ) frequencies.head() ``` ### Expected behavior condition | frequency birth control | 27655 depression | 8023 acne | 5209 anxiety | 4991 pain | 4744 ### Environment info Google Colab
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7430/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7430/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/7429
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7429/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7429/comments
https://api.github.com/repos/huggingface/datasets/issues/7429/events
https://github.com/huggingface/datasets/pull/7429
2,886,806,513
PR_kwDODunzps6M67Jd
7,429
Improved type annotation
{ "avatar_url": "https://avatars.githubusercontent.com/u/45285915?v=4", "events_url": "https://api.github.com/users/saiden89/events{/privacy}", "followers_url": "https://api.github.com/users/saiden89/followers", "following_url": "https://api.github.com/users/saiden89/following{/other_user}", "gists_url": "https://api.github.com/users/saiden89/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/saiden89", "id": 45285915, "login": "saiden89", "node_id": "MDQ6VXNlcjQ1Mjg1OTE1", "organizations_url": "https://api.github.com/users/saiden89/orgs", "received_events_url": "https://api.github.com/users/saiden89/received_events", "repos_url": "https://api.github.com/users/saiden89/repos", "site_admin": false, "starred_url": "https://api.github.com/users/saiden89/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/saiden89/subscriptions", "type": "User", "url": "https://api.github.com/users/saiden89", "user_view_type": "public" }
[]
open
false
null
[]
null
3
2025-02-28T10:39:10Z
2025-05-15T12:27:17Z
null
NONE
null
null
null
I've refined several type annotations throughout the codebase to align with current best practices and enhance overall clarity. Given the complexity of the code, there may still be areas that need further attention. I welcome any feedback or suggestions to make these improvements even better. - Fixes #7202
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 2, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/7429/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7429/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7429.diff", "html_url": "https://github.com/huggingface/datasets/pull/7429", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/7429.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7429" }
https://api.github.com/repos/huggingface/datasets/issues/7428
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7428/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7428/comments
https://api.github.com/repos/huggingface/datasets/issues/7428/events
https://github.com/huggingface/datasets/pull/7428
2,886,111,651
PR_kwDODunzps6M4kW8
7,428
Use pyupgrade --py39-plus
{ "avatar_url": "https://avatars.githubusercontent.com/u/17618148?v=4", "events_url": "https://api.github.com/users/cyyever/events{/privacy}", "followers_url": "https://api.github.com/users/cyyever/followers", "following_url": "https://api.github.com/users/cyyever/following{/other_user}", "gists_url": "https://api.github.com/users/cyyever/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/cyyever", "id": 17618148, "login": "cyyever", "node_id": "MDQ6VXNlcjE3NjE4MTQ4", "organizations_url": "https://api.github.com/users/cyyever/orgs", "received_events_url": "https://api.github.com/users/cyyever/received_events", "repos_url": "https://api.github.com/users/cyyever/repos", "site_admin": false, "starred_url": "https://api.github.com/users/cyyever/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cyyever/subscriptions", "type": "User", "url": "https://api.github.com/users/cyyever", "user_view_type": "public" }
[]
closed
false
null
[]
null
3
2025-02-28T03:39:44Z
2025-03-22T00:51:20Z
2025-03-05T15:04:16Z
CONTRIBUTOR
null
null
null
null
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7428/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7428/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7428.diff", "html_url": "https://github.com/huggingface/datasets/pull/7428", "merged_at": "2025-03-05T15:04:16Z", "patch_url": "https://github.com/huggingface/datasets/pull/7428.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7428" }
https://api.github.com/repos/huggingface/datasets/issues/7427
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7427/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7427/comments
https://api.github.com/repos/huggingface/datasets/issues/7427/events
https://github.com/huggingface/datasets/issues/7427
2,886,032,571
I_kwDODunzps6sBVy7
7,427
Error splitting the input into NAL units.
{ "avatar_url": "https://avatars.githubusercontent.com/u/47114466?v=4", "events_url": "https://api.github.com/users/MengHao666/events{/privacy}", "followers_url": "https://api.github.com/users/MengHao666/followers", "following_url": "https://api.github.com/users/MengHao666/following{/other_user}", "gists_url": "https://api.github.com/users/MengHao666/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/MengHao666", "id": 47114466, "login": "MengHao666", "node_id": "MDQ6VXNlcjQ3MTE0NDY2", "organizations_url": "https://api.github.com/users/MengHao666/orgs", "received_events_url": "https://api.github.com/users/MengHao666/received_events", "repos_url": "https://api.github.com/users/MengHao666/repos", "site_admin": false, "starred_url": "https://api.github.com/users/MengHao666/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/MengHao666/subscriptions", "type": "User", "url": "https://api.github.com/users/MengHao666", "user_view_type": "public" }
[]
open
false
null
[]
null
2
2025-02-28T02:30:15Z
2025-03-04T01:40:28Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug I am trying to finetune qwen2.5-vl on 16 * 80G GPUS, and I use `LLaMA-Factory` and set `preprocessing_num_workers=16`. However, I met the following error and the program seem to got crush. It seems that the error come from `datasets` library The error logging is like following: ```text Converting format of dataset (num_proc=16): 100%|█████████▉| 19265/19267 [11:44<00:00, 5.88 examples/s] Converting format of dataset (num_proc=16): 100%|█████████▉| 19266/19267 [11:44<00:00, 5.02 examples/s] Converting format of dataset (num_proc=16): 100%|██████████| 19267/19267 [11:44<00:00, 5.44 examples/s] Converting format of dataset (num_proc=16): 100%|██████████| 19267/19267 [11:44<00:00, 27.34 examples/s] Running tokenizer on dataset (num_proc=16): 0%| | 0/19267 [00:00<?, ? examples/s] Invalid NAL unit size (45405 > 35540). Invalid NAL unit size (86720 > 54856). Invalid NAL unit size (7131 > 3225). missing picture in access unit with size 54860 Invalid NAL unit size (48042 > 33645). missing picture in access unit with size 3229 missing picture in access unit with size 33649 Invalid NAL unit size (86720 > 54856). Invalid NAL unit size (48042 > 33645). Error splitting the input into NAL units. missing picture in access unit with size 35544 Invalid NAL unit size (45405 > 35540). Error splitting the input into NAL units. Error splitting the input into NAL units. Invalid NAL unit size (8187 > 7069). missing picture in access unit with size 7073 Invalid NAL unit size (8187 > 7069). Error splitting the input into NAL units. Invalid NAL unit size (7131 > 3225). Error splitting the input into NAL units. Invalid NAL unit size (14013 > 5998). missing picture in access unit with size 6002 Invalid NAL unit size (14013 > 5998). Error splitting the input into NAL units. Invalid NAL unit size (17173 > 7231). missing picture in access unit with size 7235 Invalid NAL unit size (17173 > 7231). Error splitting the input into NAL units. Invalid NAL unit size (16964 > 6055). missing picture in access unit with size 6059 Invalid NAL unit size (16964 > 6055). Exception in thread Thread-9 (accepter)Error splitting the input into NAL units. : Traceback (most recent call last): File "/opt/conda/envs/python3.10.13/lib/python3.10/threading.py", line 1016, in _bootstrap_inner Running tokenizer on dataset (num_proc=16): 0%| | 0/19267 [13:22<?, ? examples/s] self.run() File "/opt/conda/envs/python3.10.13/lib/python3.10/threading.py", line 953, in run Invalid NAL unit size (7032 > 2927). missing picture in access unit with size 2931 self._target(*self._args, **self._kwargs) File "/opt/conda/envs/python3.10.13/lib/python3.10/site-packages/multiprocess/managers.py", line 194, in accepter Invalid NAL unit size (7032 > 2927). Error splitting the input into NAL units. t.start() File "/opt/conda/envs/python3.10.13/lib/python3.10/threading.py", line 935, in start Invalid NAL unit size (28973 > 6121). missing picture in access unit with size 6125 _start_new_thread(self._bootstrap, ())Invalid NAL unit size (28973 > 6121). RuntimeError: can't start new threadError splitting the input into NAL units. Invalid NAL unit size (4411 > 296). missing picture in access unit with size 300 Invalid NAL unit size (4411 > 296). Error splitting the input into NAL units. Invalid NAL unit size (14414 > 1471). missing picture in access unit with size 1475 Invalid NAL unit size (14414 > 1471). Error splitting the input into NAL units. Invalid NAL unit size (5283 > 1792). missing picture in access unit with size 1796 Invalid NAL unit size (5283 > 1792). Error splitting the input into NAL units. Invalid NAL unit size (79147 > 10042). missing picture in access unit with size 10046 Invalid NAL unit size (79147 > 10042). Error splitting the input into NAL units. Invalid NAL unit size (45405 > 35540). Invalid NAL unit size (86720 > 54856). Invalid NAL unit size (7131 > 3225). missing picture in access unit with size 54860 Invalid NAL unit size (48042 > 33645). missing picture in access unit with size 3229 missing picture in access unit with size 33649 Invalid NAL unit size (86720 > 54856). Invalid NAL unit size (48042 > 33645). Error splitting the input into NAL units. missing picture in access unit with size 35544 Invalid NAL unit size (45405 > 35540). Error splitting the input into NAL units. Error splitting the input into NAL units. Invalid NAL unit size (8187 > 7069). missing picture in access unit with size 7073 Invalid NAL unit size (8187 > 7069). Error splitting the input into NAL units. Invalid NAL unit size (7131 > 3225). Error splitting the input into NAL units. Invalid NAL unit size (14013 > 5998). missing picture in access unit with size 6002 Invalid NAL unit size (14013 > 5998). Error splitting the input into NAL units. Invalid NAL unit size (17173 > 7231). missing picture in access unit with size 7235 Invalid NAL unit size (17173 > 7231). Error splitting the input into NAL units. Invalid NAL unit size (16964 > 6055). missing picture in access unit with size 6059 Invalid NAL unit size (16964 > 6055). Exception in thread Thread-9 (accepter)Error splitting the input into NAL units. : Traceback (most recent call last): File "/opt/conda/envs/python3.10.13/lib/python3.10/threading.py", line 1016, in _bootstrap_inner Running tokenizer on dataset (num_proc=16): 0%| | 0/19267 [13:22<?, ? examples/s] self.run() File "/opt/conda/envs/python3.10.13/lib/python3.10/threading.py", line 953, in run Invalid NAL unit size (7032 > 2927). missing picture in access unit with size 2931 self._target(*self._args, **self._kwargs) File "/opt/conda/envs/python3.10.13/lib/python3.10/site-packages/multiprocess/managers.py", line 194, in accepter Invalid NAL unit size (7032 > 2927). Error splitting the input into NAL units. t.start() File "/opt/conda/envs/python3.10.13/lib/python3.10/threading.py", line 935, in start Invalid NAL unit size (28973 > 6121). missing picture in access unit with size 6125 _start_new_thread(self._bootstrap, ())Invalid NAL unit size (28973 > 6121). RuntimeError: can't start new threadError splitting the input into NAL units. Invalid NAL unit size (4411 > 296). missing picture in access unit with size 300 Invalid NAL unit size (4411 > 296). Error splitting the input into NAL units. Invalid NAL unit size (14414 > 1471). missing picture in access unit with size 1475 Invalid NAL unit size (14414 > 1471). Error splitting the input into NAL units. Invalid NAL unit size (5283 > 1792). missing picture in access unit with size 1796 Invalid NAL unit size (5283 > 1792). Error splitting the input into NAL units. Invalid NAL unit size (79147 > 10042). missing picture in access unit with size 10046 Invalid NAL unit size (79147 > 10042). Error splitting the input into NAL units. Invalid NAL unit size (45405 > 35540). Invalid NAL unit size (86720 > 54856). Invalid NAL unit size (7131 > 3225). missing picture in access unit with size 54860 Invalid NAL unit size (48042 > 33645). missing picture in access unit with size 3229 missing picture in access unit with size 33649 Invalid NAL unit size (86720 > 54856). Invalid NAL unit size (48042 > 33645). Error splitting the input into NAL units. missing picture in access unit with size 35544 Invalid NAL unit size (45405 > 35540). Error splitting the input into NAL units. Error splitting the input into NAL units. Invalid NAL unit size (8187 > 7069). missing picture in access unit with size 7073 Invalid NAL unit size (8187 > 7069). Error splitting the input into NAL units. Invalid NAL unit size (7131 > 3225). Error splitting the input into NAL units. Invalid NAL unit size (14013 > 5998). missing picture in access unit with size 6002 Invalid NAL unit size (14013 > 5998). Error splitting the input into NAL units. Invalid NAL unit size (17173 > 7231). missing picture in access unit with size 7235 Invalid NAL unit size (17173 > 7231). Error splitting the input into NAL units. Invalid NAL unit size (16964 > 6055). missing picture in access unit with size 6059 Invalid NAL unit size (16964 > 6055). Exception in thread Thread-9 (accepter)Error splitting the input into NAL units. : Traceback (most recent call last): File "/opt/conda/envs/python3.10.13/lib/python3.10/threading.py", line 1016, in _bootstrap_inner Running tokenizer on dataset (num_proc=16): 0%| | 0/19267 [13:22<?, ? examples/s] self.run() File "/opt/conda/envs/python3.10.13/lib/python3.10/threading.py", line 953, in run Invalid NAL unit size (7032 > 2927). missing picture in access unit with size 2931 self._target(*self._args, **self._kwargs) File "/opt/conda/envs/python3.10.13/lib/python3.10/site-packages/multiprocess/managers.py", line 194, in accepter Invalid NAL unit size (7032 > 2927). Error splitting the input into NAL units. t.start() File "/opt/conda/envs/python3.10.13/lib/python3.10/threading.py", line 935, in start Invalid NAL unit size (28973 > 6121). missing picture in access unit with size 6125 _start_new_thread(self._bootstrap, ())Invalid NAL unit size (28973 > 6121). RuntimeError: can't start new threadError splitting the input into NAL units. Invalid NAL unit size (4411 > 296). missing picture in access unit with size 300 Invalid NAL unit size (4411 > 296). Error splitting the input into NAL units. Invalid NAL unit size (14414 > 1471). missing picture in access unit with size 1475 Invalid NAL unit size (14414 > 1471). Error splitting the input into NAL units. Invalid NAL unit size (5283 > 1792). missing picture in access unit with size 1796 Invalid NAL unit size (5283 > 1792). Error splitting the input into NAL units. Invalid NAL unit size (79147 > 10042). missing picture in access unit with size 10046 Invalid NAL unit size (79147 > 10042). Error splitting the input into NAL units. Invalid NAL unit size (45405 > 35540). Invalid NAL unit size (86720 > 54856). Invalid NAL unit size (7131 > 3225). missing picture in access unit with size 54860 Invalid NAL unit size (48042 > 33645). missing picture in access unit with size 3229 missing picture in access unit with size 33649 Invalid NAL unit size (86720 > 54856). Invalid NAL unit size (48042 > 33645). Error splitting the input into NAL units. missing picture in access unit with size 35544 Invalid NAL unit size (45405 > 35540). Error splitting the input into NAL units. Error splitting the input into NAL units. Invalid NAL unit size (8187 > 7069). missing picture in access unit with size 7073 Invalid NAL unit size (8187 > 7069). Error splitting the input into NAL units. Invalid NAL unit size (7131 > 3225). Error splitting the input into NAL units. Invalid NAL unit size (14013 > 5998). missing picture in access unit with size 6002 Invalid NAL unit size (14013 > 5998). Error splitting the input into NAL units. Invalid NAL unit size (17173 > 7231). missing picture in access unit with size 7235 Invalid NAL unit size (17173 > 7231). Error splitting the input into NAL units. Invalid NAL unit size (16964 > 6055). missing picture in access unit with size 6059 Invalid NAL unit size (16964 > 6055). Exception in thread Thread-9 (accepter)Error splitting the input into NAL units. : Traceback (most recent call last): File "/opt/conda/envs/python3.10.13/lib/python3.10/threading.py", line 1016, in _bootstrap_inner Running tokenizer on dataset (num_proc=16): 0%| | 0/19267 [13:22<?, ? examples/s] self.run() File "/opt/conda/envs/python3.10.13/lib/python3.10/threading.py", line 953, in run Invalid NAL unit size (7032 > 2927). missing picture in access unit with size 2931 self._target(*self._args, **self._kwargs) File "/opt/conda/envs/python3.10.13/lib/python3.10/site-packages/multiprocess/managers.py", line 194, in accepter Invalid NAL unit size (7032 > 2927). Error splitting the input into NAL units. t.start() File "/opt/conda/envs/python3.10.13/lib/python3.10/threading.py", line 935, in start Invalid NAL unit size (28973 > 6121). missing picture in access unit with size 6125 _start_new_thread(self._bootstrap, ())Invalid NAL unit size (28973 > 6121). RuntimeError: can't start new threadError splitting the input into NAL units. Invalid NAL unit size (4411 > 296). missing picture in access unit with size 300 Invalid NAL unit size (4411 > 296). Error splitting the input into NAL units. Invalid NAL unit size (14414 > 1471). missing picture in access unit with size 1475 Invalid NAL unit size (14414 > 1471). Error splitting the input into NAL units. Invalid NAL unit size (5283 > 1792). missing picture in access unit with size 1796 Invalid NAL unit size (5283 > 1792). Error splitting the input into NAL units. Invalid NAL unit size (79147 > 10042). missing picture in access unit with size 10046 Invalid NAL unit size (79147 > 10042). Error splitting the input into NAL units. ``` ### Others _No response_ ### Steps to reproduce the bug None ### Expected behavior excpect to run successfully ### Environment info ``` transformers==4.49.0 datasets==3.2.0 accelerate==1.2.1 peft==0.12.0 trl==0.9.6 tokenizers==0.21.0 gradio>=4.38.0,<=5.18.0 pandas>=2.0.0 scipy einops sentencepiece tiktoken protobuf uvicorn pydantic fastapi sse-starlette matplotlib>=3.7.0 fire packaging pyyaml numpy<2.0.0 av librosa tyro<0.9.0 openlm-hub qwen-vl-utils ```
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7427/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7427/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7426
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7426/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7426/comments
https://api.github.com/repos/huggingface/datasets/issues/7426/events
https://github.com/huggingface/datasets/pull/7426
2,883,754,507
PR_kwDODunzps6Mwe6B
7,426
fix: None default with bool type on load creates typing error
{ "avatar_url": "https://avatars.githubusercontent.com/u/8882233?v=4", "events_url": "https://api.github.com/users/stephantul/events{/privacy}", "followers_url": "https://api.github.com/users/stephantul/followers", "following_url": "https://api.github.com/users/stephantul/following{/other_user}", "gists_url": "https://api.github.com/users/stephantul/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/stephantul", "id": 8882233, "login": "stephantul", "node_id": "MDQ6VXNlcjg4ODIyMzM=", "organizations_url": "https://api.github.com/users/stephantul/orgs", "received_events_url": "https://api.github.com/users/stephantul/received_events", "repos_url": "https://api.github.com/users/stephantul/repos", "site_admin": false, "starred_url": "https://api.github.com/users/stephantul/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stephantul/subscriptions", "type": "User", "url": "https://api.github.com/users/stephantul", "user_view_type": "public" }
[]
closed
false
null
[]
null
0
2025-02-27T08:11:36Z
2025-03-04T15:53:40Z
2025-03-04T15:53:40Z
CONTRIBUTOR
null
null
null
Hello! Pyright flags any use of `load_dataset` as an error, because the default for `trust_remote_code` is `None`, but the function is typed as `bool`, not `Optional[bool]`. I changed the type and docstrings to reflect this, but no other code was touched.
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7426/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7426/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7426.diff", "html_url": "https://github.com/huggingface/datasets/pull/7426", "merged_at": "2025-03-04T15:53:40Z", "patch_url": "https://github.com/huggingface/datasets/pull/7426.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7426" }
https://api.github.com/repos/huggingface/datasets/issues/7425
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7425/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7425/comments
https://api.github.com/repos/huggingface/datasets/issues/7425/events
https://github.com/huggingface/datasets/issues/7425
2,883,684,686
I_kwDODunzps6r4YlO
7,425
load_dataset("livecodebench/code_generation_lite", version_tag="release_v2") TypeError: 'NoneType' object is not callable
{ "avatar_url": "https://avatars.githubusercontent.com/u/42167236?v=4", "events_url": "https://api.github.com/users/dshwei/events{/privacy}", "followers_url": "https://api.github.com/users/dshwei/followers", "following_url": "https://api.github.com/users/dshwei/following{/other_user}", "gists_url": "https://api.github.com/users/dshwei/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/dshwei", "id": 42167236, "login": "dshwei", "node_id": "MDQ6VXNlcjQyMTY3MjM2", "organizations_url": "https://api.github.com/users/dshwei/orgs", "received_events_url": "https://api.github.com/users/dshwei/received_events", "repos_url": "https://api.github.com/users/dshwei/repos", "site_admin": false, "starred_url": "https://api.github.com/users/dshwei/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dshwei/subscriptions", "type": "User", "url": "https://api.github.com/users/dshwei", "user_view_type": "public" }
[]
open
false
null
[]
null
10
2025-02-27T07:36:02Z
2025-03-27T05:05:33Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug from datasets import load_dataset lcb_codegen = load_dataset("livecodebench/code_generation_lite", version_tag="release_v2") or configs = get_dataset_config_names("livecodebench/code_generation_lite", trust_remote_code=True) both error: Traceback (most recent call last): File "", line 1, in File "/workspace/miniconda/envs/grpo/lib/python3.10/site-packages/datasets/load.py", line 2131, in load_dataset builder_instance = load_dataset_builder( File "/workspace/miniconda/envs/grpo/lib/python3.10/site-packages/datasets/load.py", line 1888, in load_dataset_builder builder_instance: DatasetBuilder = builder_cls( TypeError: 'NoneType' object is not callable ### Steps to reproduce the bug from datasets import get_dataset_config_names configs = get_dataset_config_names("livecodebench/code_generation_lite", trust_remote_code=True) OR lcb_codegen = load_dataset("livecodebench/code_generation_lite", version_tag="release_v2") ### Expected behavior load datasets livecodebench/code_generation_lite ### Environment info import datasets version '3.3.2'
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 1, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/7425/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7425/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7424
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7424/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7424/comments
https://api.github.com/repos/huggingface/datasets/issues/7424/events
https://github.com/huggingface/datasets/pull/7424
2,882,663,621
PR_kwDODunzps6Ms1Qx
7,424
Faster folder based builder + parquet support + allow repeated media + use torchvideo
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
1
2025-02-26T19:55:18Z
2025-03-05T18:51:00Z
2025-03-05T17:41:23Z
MEMBER
null
null
null
This will be useful for LeRobotDataset (robotics datasets for [lerobot](https://github.com/huggingface/lerobot) based on videos) Impacted builders: - ImageFolder - AudioFolder - VideoFolder Improvements: - faster to stream (got a 5x speed up on an image dataset) - improved RAM usage - support for metadata.parquet - allow to link to an image/audio/video multiple times - support for pyarrow filters (mostly efficient for parquet) - link to files using fields names `*_file_name` (in addition to the already existing `file_name`) - this allows to have multiple image/audio/video per row - there is also `file_names` and `*_file_names` for lists of image/audio/video Changes: - the builders iterate on the metadata files instead of the media files - the builders iterate on chunks of metadata instead of loading them in RAM completely - metadata files are no longer handled separately in `data_files` - added the `filters` argument to pass to `load_dataset` - either as an [Expression](https://arrow.apache.org/docs/python/generated/pyarrow.dataset.Expression.html) - or as tuples like `filters=[('event_name', '=', 'SomeEvent')]` - small breaking change: you can't add labels to a dataset with`drop_labels=False` if it has a metadata file - small breaking change: you can't use one metadata file for multiple splits anymore Example: `lhoestq/pusht-videofolder` is a video dataset with metadata.parquet where multiple rows can point to the same video ```python In [1]: from datasets import load_dataset In [2]: load_dataset("lhoestq/pusht-videofolder") Resolving data files: 100%|██████████████████████████████| 207/207 [00:00<00:00, 1087.32it/s] Out[2]: DatasetDict({ train: Dataset({ features: ['video', 'observation.state', 'action', 'episode_index', 'frame_index', 'timestamp', 'next.reward', 'next.done', 'next.success', 'index', 'task_index'], num_rows: 25650 }) }) In [3]: load_dataset("lhoestq/pusht-videofolder", filters=[("next.reward", ">", 0.5)]) Resolving data files: 100%|██████████████████████████████| 207/207 [00:01<00:00, 183.03it/s] Out[3]: DatasetDict({ train: Dataset({ features: ['video', 'observation.state', 'action', 'episode_index', 'frame_index', 'timestamp', 'next.reward', 'next.done', 'next.success', 'index', 'task_index'], num_rows: 5773 }) }) ``` Additional change for VideoFolder: - decord can't be installed in many setups, I switched the backend to torchvision instead - I also added streaming capability from HF (you can get video frames without downloading the full video from HF) Example: load a robotics dataset ```python In [1]: from datasets import load_dataset ds In [2]: ds = load_dataset("lhoestq/pusht-videofolder") Resolving data files: 100%|██████████████████████████████| 207/207 [00:00<00:00, 624.81it/s] In [3]: ds["train"][0] Out[3]: {'video': <torchvision.io.video_reader.VideoReader at 0x1145dc290>, 'observation.state': [222.0, 97.0], 'action': [233.0, 71.0], 'episode_index': 0, 'frame_index': 0, 'timestamp': 0.0, 'next.reward': 0.19029748439788818, 'next.done': False, 'next.success': False, 'index': 0, 'task_index': 0} ``` Example: stream frames without downloading full videos ```python In [1]: from datasets import load_dataset In [2]: ds = load_dataset("BrianGuo/Tennis_Data", streaming=True) In [3]: example = next(iter(ds["train"])) In [4]: video = example["video"] In [5]: video.get_metadata() Out[5]: {'audio': {'framerate': [44100.0], 'duration': [2027.35]}, 'video': {'fps': [59.00002712894387], 'duration': [2027.355]}} In [6]: video.seek(1800, keyframes_only=True) # 30min Out[6]: <torchvision.io.video_reader.VideoReader at 0x148d4d010> In [7]: next(video) Out[7]: {'data': tensor([[[ 76, 77, 79, ..., 41, 39, 38], [ 76, 77, 79, ..., 40, 39, 35], [ 76, 77, 79, ..., 34, 30, 26], ..., [127, 127, 127, ..., 125, 125, 125], [125, 126, 126, ..., 125, 125, 125], [122, 124, 126, ..., 125, 125, 125]]], dtype=torch.uint8), 'pts': 1800.0} ``` TODO: - [x] docs - [x] fix tests
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 2, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/7424/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7424/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7424.diff", "html_url": "https://github.com/huggingface/datasets/pull/7424", "merged_at": "2025-03-05T17:41:22Z", "patch_url": "https://github.com/huggingface/datasets/pull/7424.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7424" }
https://api.github.com/repos/huggingface/datasets/issues/7423
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7423/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7423/comments
https://api.github.com/repos/huggingface/datasets/issues/7423/events
https://github.com/huggingface/datasets/issues/7423
2,879,271,409
I_kwDODunzps6rnjHx
7,423
Row indexing a dataset with numpy integers
{ "avatar_url": "https://avatars.githubusercontent.com/u/35470740?v=4", "events_url": "https://api.github.com/users/DavidRConnell/events{/privacy}", "followers_url": "https://api.github.com/users/DavidRConnell/followers", "following_url": "https://api.github.com/users/DavidRConnell/following{/other_user}", "gists_url": "https://api.github.com/users/DavidRConnell/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/DavidRConnell", "id": 35470740, "login": "DavidRConnell", "node_id": "MDQ6VXNlcjM1NDcwNzQw", "organizations_url": "https://api.github.com/users/DavidRConnell/orgs", "received_events_url": "https://api.github.com/users/DavidRConnell/received_events", "repos_url": "https://api.github.com/users/DavidRConnell/repos", "site_admin": false, "starred_url": "https://api.github.com/users/DavidRConnell/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/DavidRConnell/subscriptions", "type": "User", "url": "https://api.github.com/users/DavidRConnell", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
null
[]
null
1
2025-02-25T18:44:45Z
2025-03-03T17:55:24Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Feature request Allow indexing datasets with a scalar numpy integer type. ### Motivation Indexing a dataset with a scalar numpy.int* object raises a TypeError. This is due to the test in `datasets/formatting/formatting.py:key_to_query_type` ``` python def key_to_query_type(key: Union[int, slice, range, str, Iterable]) -> str: if isinstance(key, int): return "row" elif isinstance(key, str): return "column" elif isinstance(key, (slice, range, Iterable)): return "batch" _raise_bad_key_type(key) ``` In the row case, it checks if key is an int, which returns false when key is integer like but not a builtin python integer type. This is counterintuitive because a numpy array of np.int64s can be used for the batch case. For example: ``` python import numpy as np import datasets dataset = datasets.Dataset.from_dict({"a": [1, 2, 3, 4], "b": [5, 6, 7, 8]}) # Regular indexing dataset[0] dataset[:2] # Indexing with numpy data types (expect same results) idx = np.asarray([0, 1]) dataset[idx] # Succeeds when using an array of np.int64 values dataset[idx[0]] # Fails with TypeError when using scalar np.int64 ``` For the user, this can be solved by wrapping `idx[0]` in `int` but the test could also be changed in `key_to_query_type` to accept a less strict definition of int. ``` diff +import numbers + def key_to_query_type(key: Union[int, slice, range, str, Iterable]) -> str: + if isinstance(key, numbers.Integral): - if isinstance(key, int): return "row" elif isinstance(key, str): return "column" elif isinstance(key, (slice, range, Iterable)): return "batch" _raise_bad_key_type(key) ``` Looking at how others do it, pandas has an `is_integer` definition that it checks which uses `is_integer_object` defined in `pandas/_libs/utils.pxd`: ``` cython cdef inline bint is_integer_object(object obj) noexcept: """ Cython equivalent of `isinstance(val, (int, np.integer)) and not isinstance(val, (bool, np.timedelta64))` Parameters ---------- val : object Returns ------- is_integer : bool Notes ----- This counts np.timedelta64 objects as integers. """ return (not PyBool_Check(obj) and isinstance(obj, (int, cnp.integer)) and not is_timedelta64_object(obj)) ``` This would be less flexible as it explicitly checks for numpy integer, but worth noting that they had the need to ensure the key is not a bool. ### Your contribution I can submit a pull request with the above changes after checking that indexing succeeds with the numpy integer type. Or if there is a different integer check that would be preferred I could add that. If there is a reason not to want this behavior that is fine too.
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7423/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7423/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7421
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7421/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7421/comments
https://api.github.com/repos/huggingface/datasets/issues/7421/events
https://github.com/huggingface/datasets/issues/7421
2,878,369,052
I_kwDODunzps6rkG0c
7,421
DVC integration broken
{ "avatar_url": "https://avatars.githubusercontent.com/u/34747372?v=4", "events_url": "https://api.github.com/users/maxstrobel/events{/privacy}", "followers_url": "https://api.github.com/users/maxstrobel/followers", "following_url": "https://api.github.com/users/maxstrobel/following{/other_user}", "gists_url": "https://api.github.com/users/maxstrobel/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/maxstrobel", "id": 34747372, "login": "maxstrobel", "node_id": "MDQ6VXNlcjM0NzQ3Mzcy", "organizations_url": "https://api.github.com/users/maxstrobel/orgs", "received_events_url": "https://api.github.com/users/maxstrobel/received_events", "repos_url": "https://api.github.com/users/maxstrobel/repos", "site_admin": false, "starred_url": "https://api.github.com/users/maxstrobel/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/maxstrobel/subscriptions", "type": "User", "url": "https://api.github.com/users/maxstrobel", "user_view_type": "public" }
[]
open
false
null
[]
null
1
2025-02-25T13:14:31Z
2025-03-03T17:42:02Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug The DVC integration seems to be broken. Followed this guide: https://dvc.org/doc/user-guide/integrations/huggingface ### Steps to reproduce the bug #### Script to reproduce ~~~python from datasets import load_dataset dataset = load_dataset( "csv", data_files="dvc://workshop/satellite-data/jan_train.csv", storage_options={"url": "https://github.com/iterative/dataset-registry.git"}, ) print(dataset) ~~~ #### Error log ~~~ Traceback (most recent call last): File "C:\tmp\test\load.py", line 3, in <module> dataset = load_dataset( ^^^^^^^^^^^^^ File "C:\tmp\test\.venv\Lib\site-packages\datasets\load.py", line 2151, in load_dataset builder_instance.download_and_prepare( File "C:\tmp\test\.venv\Lib\site-packages\datasets\builder.py", line 808, in download_and_prepare fs, output_dir = url_to_fs(output_dir, **(storage_options or {})) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ TypeError: url_to_fs() got multiple values for argument 'url' ~~~ ### Expected behavior Integration would work and the indicated file is downloaded and opened. ### Environment info #### Python version ~~~ python --version Python 3.11.10 ~~~ #### Venv (pip install datasets dvc): ~~~ Package Version ---------------------- ----------- aiohappyeyeballs 2.4.6 aiohttp 3.11.13 aiohttp-retry 2.9.1 aiosignal 1.3.2 amqp 5.3.1 annotated-types 0.7.0 antlr4-python3-runtime 4.9.3 appdirs 1.4.4 asyncssh 2.20.0 atpublic 5.1 attrs 25.1.0 billiard 4.2.1 celery 5.4.0 certifi 2025.1.31 cffi 1.17.1 charset-normalizer 3.4.1 click 8.1.8 click-didyoumean 0.3.1 click-plugins 1.1.1 click-repl 0.3.0 colorama 0.4.6 configobj 5.0.9 cryptography 44.0.1 datasets 3.3.2 dictdiffer 0.9.0 dill 0.3.8 diskcache 5.6.3 distro 1.9.0 dpath 2.2.0 dulwich 0.22.7 dvc 3.59.1 dvc-data 3.16.9 dvc-http 2.32.0 dvc-objects 5.1.0 dvc-render 1.0.2 dvc-studio-client 0.21.0 dvc-task 0.40.2 entrypoints 0.4 filelock 3.17.0 flatten-dict 0.4.2 flufl-lock 8.1.0 frozenlist 1.5.0 fsspec 2024.12.0 funcy 2.0 gitdb 4.0.12 gitpython 3.1.44 grandalf 0.8 gto 1.7.2 huggingface-hub 0.29.1 hydra-core 1.3.2 idna 3.10 iterative-telemetry 0.0.10 kombu 5.4.2 markdown-it-py 3.0.0 mdurl 0.1.2 multidict 6.1.0 multiprocess 0.70.16 networkx 3.4.2 numpy 2.2.3 omegaconf 2.3.0 orjson 3.10.15 packaging 24.2 pandas 2.2.3 pathspec 0.12.1 platformdirs 4.3.6 prompt-toolkit 3.0.50 propcache 0.3.0 psutil 7.0.0 pyarrow 19.0.1 pycparser 2.22 pydantic 2.10.6 pydantic-core 2.27.2 pydot 3.0.4 pygit2 1.17.0 pygments 2.19.1 pygtrie 2.5.0 pyparsing 3.2.1 python-dateutil 2.9.0.post0 pytz 2025.1 pywin32 308 pyyaml 6.0.2 requests 2.32.3 rich 13.9.4 ruamel-yaml 0.18.10 ruamel-yaml-clib 0.2.12 scmrepo 3.3.10 semver 3.0.4 setuptools 75.8.0 shellingham 1.5.4 shortuuid 1.0.13 shtab 1.7.1 six 1.17.0 smmap 5.0.2 sqltrie 0.11.2 tabulate 0.9.0 tomlkit 0.13.2 tqdm 4.67.1 typer 0.15.1 typing-extensions 4.12.2 tzdata 2025.1 urllib3 2.3.0 vine 5.1.0 voluptuous 0.15.2 wcwidth 0.2.13 xxhash 3.5.0 yarl 1.18.3 zc-lockfile 3.0.post1 ~~~
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7421/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7421/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7420
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7420/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7420/comments
https://api.github.com/repos/huggingface/datasets/issues/7420/events
https://github.com/huggingface/datasets/issues/7420
2,876,281,928
I_kwDODunzps6rcJRI
7,420
better correspondence between cached and saved datasets created using from_generator
{ "avatar_url": "https://avatars.githubusercontent.com/u/12157034?v=4", "events_url": "https://api.github.com/users/vttrifonov/events{/privacy}", "followers_url": "https://api.github.com/users/vttrifonov/followers", "following_url": "https://api.github.com/users/vttrifonov/following{/other_user}", "gists_url": "https://api.github.com/users/vttrifonov/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/vttrifonov", "id": 12157034, "login": "vttrifonov", "node_id": "MDQ6VXNlcjEyMTU3MDM0", "organizations_url": "https://api.github.com/users/vttrifonov/orgs", "received_events_url": "https://api.github.com/users/vttrifonov/received_events", "repos_url": "https://api.github.com/users/vttrifonov/repos", "site_admin": false, "starred_url": "https://api.github.com/users/vttrifonov/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/vttrifonov/subscriptions", "type": "User", "url": "https://api.github.com/users/vttrifonov", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
null
[]
null
0
2025-02-24T22:14:37Z
2025-02-26T03:10:22Z
null
CONTRIBUTOR
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Feature request At the moment `.from_generator` can only create a dataset that lives in the cache. The cached dataset cannot be loaded with `load_from_disk` because the cache folder is missing `state.json`. So the only way to convert this cached dataset to a regular is to use `save_to_disk` which needs to create a copy of the cached dataset. For large datasets this can end up wasting a lot of space. In my case the saving operation failed so I am stuck with a large cached dataset and no clear way to convert to a `Dataset` that I can use. The requested feature is to provide a way to be able to load a cached dataset using `.load_from_disk`. Alternatively `.from_generator` can create the dataset at a specified location so that it can be loaded from there with `.load_from_disk`. ### Motivation I have the following workflow which has exposed some awkwardness about the Datasets saving/caching. 1. I created a cached dataset using `.from_generator` which was cached in a folder. This dataset is rather large (~600GB) with many shards. 2. I tried to save this dataset using `.save_to_disk` to another location so that I can use later as a `Dataset`. This essentially creates another copy (for a total of 1.2TB!) of what is already in the cache... In my case the saving operation keeps dying for some reason and I am stuck with a cached dataset and no copy. 3. Now I am trying to "save" the existing cached dataset but it is not clear how to access the cached files after `.from_generator` has finished e.g. from a different process. I should not be even looking at the cache but I really do not want to waste another 2hr to generate the set so that if fails agains (I already did this couple of times). - I tried `.load_from_disk` but it does not work with cached files and complains that this is not a `Dataset` (!). - I looked at `.from_file` which takes one file but the cached file has many (shards) so I am not sure how to make this work. - I tried `.load_dataset` but this seems to either try to "download" a copy (of a file which is already in the local file system!) which I will then need to save or I need to use `streaming=False` to create an `IterableDataset `which then I need to convert (using the cache) to `Dataset` so that I can save it. With both options I will end up with 3 copies of the same dataset for a total of ~2TB! I am hoping here is another way to do this... Maybe I am missing something here: I looked at docs and forums but no luck. I have a bunch of arrow files cached by `Dataset.from_generator` and no clean way to make them into a `Dataset` that I can use. This all could be so much easer if `load_from_disk` can recognize the cached files and produce a `Dataset`: after the cache is created I would not have to "save" it again and I can just load it when I need. At the moment `load_from_disk` needs `state.json` which is lacking in the cache folder. So perhaps `.from_generator` could be made to "finalize" (e.g. create `state.json`) the dataset once it is done so that it can be loaded easily. Or provide `.from_generator` with a `save_to_dir` parameter in addition to `cache_dir` which can be used for the whole process including creating the `state.json` at the end. As a proof of concept I just created `state.json` by hand and `load_from_disk` worked using the cache! So it seems to be the missing piece here. ### Your contribution Time permitting I can look into `.from_generator` to see if adding `state.json` is feasible.
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7420/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7420/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7419
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7419/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7419/comments
https://api.github.com/repos/huggingface/datasets/issues/7419/events
https://github.com/huggingface/datasets/issues/7419
2,875,635,320
I_kwDODunzps6rZrZ4
7,419
Import order crashes script execution
{ "avatar_url": "https://avatars.githubusercontent.com/u/23298479?v=4", "events_url": "https://api.github.com/users/DamienMatias/events{/privacy}", "followers_url": "https://api.github.com/users/DamienMatias/followers", "following_url": "https://api.github.com/users/DamienMatias/following{/other_user}", "gists_url": "https://api.github.com/users/DamienMatias/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/DamienMatias", "id": 23298479, "login": "DamienMatias", "node_id": "MDQ6VXNlcjIzMjk4NDc5", "organizations_url": "https://api.github.com/users/DamienMatias/orgs", "received_events_url": "https://api.github.com/users/DamienMatias/received_events", "repos_url": "https://api.github.com/users/DamienMatias/repos", "site_admin": false, "starred_url": "https://api.github.com/users/DamienMatias/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/DamienMatias/subscriptions", "type": "User", "url": "https://api.github.com/users/DamienMatias", "user_view_type": "public" }
[]
open
false
null
[]
null
0
2025-02-24T17:03:43Z
2025-02-24T17:03:43Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug Hello, I'm trying to convert an HF dataset into a TFRecord so I'm importing `tensorflow` and `datasets` to do so. Depending in what order I'm importing those librairies, my code hangs forever and is unkillable (CTRL+C doesn't work, I need to kill my shell entirely). Thank you for your help 🙏 ### Steps to reproduce the bug If you run the following script, this will hang forever : ```python import tensorflow as tf import datasets dataset = datasets.load_dataset("imagenet-1k", split="validation", streaming=True) print(next(iter(dataset))) ``` however running the following will work fine (I just changed the order of the imports) : ```python import datasets import tensorflow as tf dataset = datasets.load_dataset("imagenet-1k", split="validation", streaming=True) print(next(iter(dataset))) ``` ### Expected behavior I'm expecting the script to reach the end and my case print the content of the first item in the dataset ``` {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=408x500 at 0x70C646A03110>, 'label': 91} ``` ### Environment info ``` $ datasets-cli env - `datasets` version: 3.3.2 - Platform: Linux-6.8.0-1017-aws-x86_64-with-glibc2.35 - Python version: 3.11.7 - `huggingface_hub` version: 0.29.1 - PyArrow version: 19.0.1 - Pandas version: 2.2.3 - `fsspec` version: 2024.12.0 ``` I'm also using `tensorflow==2.18.0`.
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7419/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7419/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7418
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7418/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7418/comments
https://api.github.com/repos/huggingface/datasets/issues/7418/events
https://github.com/huggingface/datasets/issues/7418
2,868,701,471
I_kwDODunzps6q_Okf
7,418
pyarrow.lib.arrowinvalid: cannot mix list and non-list, non-null values with map function
{ "avatar_url": "https://avatars.githubusercontent.com/u/15705569?v=4", "events_url": "https://api.github.com/users/alexxchen/events{/privacy}", "followers_url": "https://api.github.com/users/alexxchen/followers", "following_url": "https://api.github.com/users/alexxchen/following{/other_user}", "gists_url": "https://api.github.com/users/alexxchen/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/alexxchen", "id": 15705569, "login": "alexxchen", "node_id": "MDQ6VXNlcjE1NzA1NTY5", "organizations_url": "https://api.github.com/users/alexxchen/orgs", "received_events_url": "https://api.github.com/users/alexxchen/received_events", "repos_url": "https://api.github.com/users/alexxchen/repos", "site_admin": false, "starred_url": "https://api.github.com/users/alexxchen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/alexxchen/subscriptions", "type": "User", "url": "https://api.github.com/users/alexxchen", "user_view_type": "public" }
[]
open
false
null
[]
null
4
2025-02-21T10:58:06Z
2025-02-25T15:26:46Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug Encounter pyarrow.lib.arrowinvalid error with map function in some example when loading the dataset ### Steps to reproduce the bug ``` from datasets import load_dataset from PIL import Image, PngImagePlugin dataset = load_dataset("leonardPKU/GEOQA_R1V_Train_8K") system_prompt="You are a helpful AI Assistant" def make_conversation(example): prompt = [] prompt.append({"role": "system", "content": system_prompt}) prompt.append( { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": example["problem"]}, ] } ) return {"prompt": prompt} def check_data_types(example): for key, value in example.items(): if key == 'image': if not isinstance(value, PngImagePlugin.PngImageFile): print(value) if key == "problem" or key == "solution": if not isinstance(value, str): print(value) return example dataset = dataset.map(check_data_types) dataset = dataset.map(make_conversation) ``` ### Expected behavior Successfully process the dataset with map ### Environment info datasets==3.3.1
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7418/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7418/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7417
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7417/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7417/comments
https://api.github.com/repos/huggingface/datasets/issues/7417/events
https://github.com/huggingface/datasets/pull/7417
2,866,868,922
PR_kwDODunzps6L78k3
7,417
set dev version
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
1
2025-02-20T17:45:29Z
2025-02-20T17:47:50Z
2025-02-20T17:45:36Z
MEMBER
null
null
null
null
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7417/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7417/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7417.diff", "html_url": "https://github.com/huggingface/datasets/pull/7417", "merged_at": "2025-02-20T17:45:36Z", "patch_url": "https://github.com/huggingface/datasets/pull/7417.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7417" }
https://api.github.com/repos/huggingface/datasets/issues/7416
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7416/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7416/comments
https://api.github.com/repos/huggingface/datasets/issues/7416/events
https://github.com/huggingface/datasets/pull/7416
2,866,862,143
PR_kwDODunzps6L77G2
7,416
Release: 3.3.2
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
1
2025-02-20T17:42:11Z
2025-02-20T17:44:35Z
2025-02-20T17:43:28Z
MEMBER
null
null
null
null
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7416/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7416/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7416.diff", "html_url": "https://github.com/huggingface/datasets/pull/7416", "merged_at": "2025-02-20T17:43:28Z", "patch_url": "https://github.com/huggingface/datasets/pull/7416.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7416" }
https://api.github.com/repos/huggingface/datasets/issues/7415
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7415/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7415/comments
https://api.github.com/repos/huggingface/datasets/issues/7415/events
https://github.com/huggingface/datasets/issues/7415
2,865,774,546
I_kwDODunzps6q0D_S
7,415
Shard Dataset at specific indices
{ "avatar_url": "https://avatars.githubusercontent.com/u/11044035?v=4", "events_url": "https://api.github.com/users/nikonikolov/events{/privacy}", "followers_url": "https://api.github.com/users/nikonikolov/followers", "following_url": "https://api.github.com/users/nikonikolov/following{/other_user}", "gists_url": "https://api.github.com/users/nikonikolov/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/nikonikolov", "id": 11044035, "login": "nikonikolov", "node_id": "MDQ6VXNlcjExMDQ0MDM1", "organizations_url": "https://api.github.com/users/nikonikolov/orgs", "received_events_url": "https://api.github.com/users/nikonikolov/received_events", "repos_url": "https://api.github.com/users/nikonikolov/repos", "site_admin": false, "starred_url": "https://api.github.com/users/nikonikolov/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nikonikolov/subscriptions", "type": "User", "url": "https://api.github.com/users/nikonikolov", "user_view_type": "public" }
[]
open
false
null
[]
null
3
2025-02-20T10:43:10Z
2025-02-24T11:06:45Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
I have a dataset of sequences, where each example in the sequence is a separate row in the dataset (similar to LeRobotDataset). When running `Dataset.save_to_disk` how can I provide indices where it's possible to shard the dataset such that no episode spans more than 1 shard. Consequently, when I run `Dataset.load_from_disk`, how can I load just a subset of the shards to save memory and time on different ranks? I guess an alternative to this would be, given a loaded `Dataset`, how can I run `Dataset.shard` such that sharding doesn't split any episode across shards?
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7415/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7415/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7414
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7414/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7414/comments
https://api.github.com/repos/huggingface/datasets/issues/7414/events
https://github.com/huggingface/datasets/pull/7414
2,863,798,756
PR_kwDODunzps6LxjsH
7,414
Gracefully cancel async tasks
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
1
2025-02-19T16:10:58Z
2025-02-20T14:12:26Z
2025-02-20T14:12:23Z
MEMBER
null
null
null
null
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7414/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7414/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7414.diff", "html_url": "https://github.com/huggingface/datasets/pull/7414", "merged_at": "2025-02-20T14:12:23Z", "patch_url": "https://github.com/huggingface/datasets/pull/7414.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7414" }
https://api.github.com/repos/huggingface/datasets/issues/7413
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7413/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7413/comments
https://api.github.com/repos/huggingface/datasets/issues/7413/events
https://github.com/huggingface/datasets/issues/7413
2,860,947,582
I_kwDODunzps6qhph-
7,413
Documentation on multiple media files of the same type with WebDataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/3616964?v=4", "events_url": "https://api.github.com/users/DCNemesis/events{/privacy}", "followers_url": "https://api.github.com/users/DCNemesis/followers", "following_url": "https://api.github.com/users/DCNemesis/following{/other_user}", "gists_url": "https://api.github.com/users/DCNemesis/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/DCNemesis", "id": 3616964, "login": "DCNemesis", "node_id": "MDQ6VXNlcjM2MTY5NjQ=", "organizations_url": "https://api.github.com/users/DCNemesis/orgs", "received_events_url": "https://api.github.com/users/DCNemesis/received_events", "repos_url": "https://api.github.com/users/DCNemesis/repos", "site_admin": false, "starred_url": "https://api.github.com/users/DCNemesis/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/DCNemesis/subscriptions", "type": "User", "url": "https://api.github.com/users/DCNemesis", "user_view_type": "public" }
[]
open
false
null
[]
null
1
2025-02-18T16:13:20Z
2025-02-20T14:17:54Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
The [current documentation](https://huggingface.co/docs/datasets/en/video_dataset) on a creating a video dataset includes only examples with one media file and one json. It would be useful to have examples where multiple files of the same type are included. For example, in a sign language dataset, you may have a base video and a video annotation of the extracted pose. According to the WebDataset documentation, this should be able to be done with period separated filenames. For example: ```e39871fd9fd74f55.base.mp4 e39871fd9fd74f55.pose.mp4 e39871fd9fd74f55.json f18b91585c4d3f3e.base.mp4 f18b91585c4d3f3e.pose.mp4 f18b91585c4d3f3e.json ... ``` If you can confirm that this method of including multiple media files works with huggingface datasets and include an example in the documentation, I'd appreciate it.
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7413/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7413/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7412
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7412/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7412/comments
https://api.github.com/repos/huggingface/datasets/issues/7412/events
https://github.com/huggingface/datasets/issues/7412
2,859,433,710
I_kwDODunzps6qb37u
7,412
Index Error Invalid Ket is out of bounds for size 0 for code-search-net/code_search_net dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/56113657?v=4", "events_url": "https://api.github.com/users/harshakhmk/events{/privacy}", "followers_url": "https://api.github.com/users/harshakhmk/followers", "following_url": "https://api.github.com/users/harshakhmk/following{/other_user}", "gists_url": "https://api.github.com/users/harshakhmk/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/harshakhmk", "id": 56113657, "login": "harshakhmk", "node_id": "MDQ6VXNlcjU2MTEzNjU3", "organizations_url": "https://api.github.com/users/harshakhmk/orgs", "received_events_url": "https://api.github.com/users/harshakhmk/received_events", "repos_url": "https://api.github.com/users/harshakhmk/repos", "site_admin": false, "starred_url": "https://api.github.com/users/harshakhmk/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/harshakhmk/subscriptions", "type": "User", "url": "https://api.github.com/users/harshakhmk", "user_view_type": "public" }
[]
open
false
null
[]
null
0
2025-02-18T05:58:33Z
2025-02-18T06:42:07Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug I am trying to do model pruning on sentence-transformers/all-mini-L6-v2 for the code-search-net/code_search_net dataset using INCTrainer class However I am getting below error ``` raise IndexError(f"Invalid Key: {key is our of bounds for size {size}") IndexError: Invalid key: 1840208 is out of bounds for size 0 ``` ### Steps to reproduce the bug Model pruning on the above dataset using the below guide https://huggingface.co/docs/optimum/en/intel/neural_compressor/optimization#pruning ### Expected behavior The modsl should be successfully pruned ### Environment info Torch version: 2.4.1 Python version: 3.8.10
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7412/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7412/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7411
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7411/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7411/comments
https://api.github.com/repos/huggingface/datasets/issues/7411/events
https://github.com/huggingface/datasets/pull/7411
2,858,993,390
PR_kwDODunzps6LhV0Z
7,411
Attempt to fix multiprocessing hang by closing and joining the pool before termination
{ "avatar_url": "https://avatars.githubusercontent.com/u/43149077?v=4", "events_url": "https://api.github.com/users/dakinggg/events{/privacy}", "followers_url": "https://api.github.com/users/dakinggg/followers", "following_url": "https://api.github.com/users/dakinggg/following{/other_user}", "gists_url": "https://api.github.com/users/dakinggg/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/dakinggg", "id": 43149077, "login": "dakinggg", "node_id": "MDQ6VXNlcjQzMTQ5MDc3", "organizations_url": "https://api.github.com/users/dakinggg/orgs", "received_events_url": "https://api.github.com/users/dakinggg/received_events", "repos_url": "https://api.github.com/users/dakinggg/repos", "site_admin": false, "starred_url": "https://api.github.com/users/dakinggg/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dakinggg/subscriptions", "type": "User", "url": "https://api.github.com/users/dakinggg", "user_view_type": "public" }
[]
closed
false
null
[]
null
3
2025-02-17T23:58:03Z
2025-02-19T21:11:24Z
2025-02-19T13:40:32Z
CONTRIBUTOR
null
null
null
https://github.com/huggingface/datasets/issues/6393 has plagued me on and off for a very long time. I have had various workarounds (one time combining two filter calls into one filter call removed the issue, another time making rank 0 go first resolved a cache race condition, one time i think upgrading the version of something resolved it). I don't know hf datasets well enough to fully understand the root cause, but I _think_ this PR fixes it. Evidence: I have an LLM Foundry training yaml/script (datasets version 3.2.0) that results in a hang ~1/10 times (for a baseline for this testing, it was 2/36 runs that hung). I also reran with the latest datasets version (3.3.1) and got 4/36 hung. Installing datasets from this PR, I was able to successful run the script 144 times without a hang occurring. Assuming the base probability is 1/10, this should be more than enough times to have confidence it works. After adding some logging, I could see that the code hung during the __exit__ of the mp pool context manager, after all shards had been processed, and the tqdm context manager had exited. My best explanation: When multiprocessing pool __exit__ is called, it calls pool.terminate, which forcefully exits all the processes (and calls code related to this that I haven't looked at closely). I'm guessing this forceful termination has a bad interaction with some multithreading/multiprocessing that hf datasets does. If we instead call pool.close and pool.join before the pool.terminate happens, perhaps whatever that bad interaction is is able to complete gracefully, and then terminate call proceeds without issue. If this PR seems good to you, I'd be very appreciative if you were able to do a patch release including it. Thank you! @lhoestq
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7411/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7411/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7411.diff", "html_url": "https://github.com/huggingface/datasets/pull/7411", "merged_at": "2025-02-19T13:40:32Z", "patch_url": "https://github.com/huggingface/datasets/pull/7411.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7411" }
https://api.github.com/repos/huggingface/datasets/issues/7410
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7410/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7410/comments
https://api.github.com/repos/huggingface/datasets/issues/7410/events
https://github.com/huggingface/datasets/pull/7410
2,858,085,707
PR_kwDODunzps6LeQBF
7,410
Set dev version
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
1
2025-02-17T14:54:39Z
2025-02-17T14:56:58Z
2025-02-17T14:54:56Z
MEMBER
null
null
null
null
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7410/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7410/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7410.diff", "html_url": "https://github.com/huggingface/datasets/pull/7410", "merged_at": "2025-02-17T14:54:56Z", "patch_url": "https://github.com/huggingface/datasets/pull/7410.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7410" }
https://api.github.com/repos/huggingface/datasets/issues/7409
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7409/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7409/comments
https://api.github.com/repos/huggingface/datasets/issues/7409/events
https://github.com/huggingface/datasets/pull/7409
2,858,079,508
PR_kwDODunzps6LeOpY
7,409
Release: 3.3.1
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
1
2025-02-17T14:52:12Z
2025-02-17T14:54:32Z
2025-02-17T14:53:13Z
MEMBER
null
null
null
null
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7409/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7409/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7409.diff", "html_url": "https://github.com/huggingface/datasets/pull/7409", "merged_at": "2025-02-17T14:53:13Z", "patch_url": "https://github.com/huggingface/datasets/pull/7409.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7409" }
https://api.github.com/repos/huggingface/datasets/issues/7408
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7408/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7408/comments
https://api.github.com/repos/huggingface/datasets/issues/7408/events
https://github.com/huggingface/datasets/pull/7408
2,858,012,313
PR_kwDODunzps6Ld_-m
7,408
Fix filter speed regression
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
1
2025-02-17T14:25:32Z
2025-02-17T14:28:48Z
2025-02-17T14:28:46Z
MEMBER
null
null
null
close https://github.com/huggingface/datasets/issues/7404
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7408/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7408/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7408.diff", "html_url": "https://github.com/huggingface/datasets/pull/7408", "merged_at": "2025-02-17T14:28:46Z", "patch_url": "https://github.com/huggingface/datasets/pull/7408.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7408" }
https://api.github.com/repos/huggingface/datasets/issues/7407
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7407/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7407/comments
https://api.github.com/repos/huggingface/datasets/issues/7407/events
https://github.com/huggingface/datasets/pull/7407
2,856,517,442
PR_kwDODunzps6LY7y5
7,407
Update use_with_pandas.mdx: to_pandas() correction in last section
{ "avatar_url": "https://avatars.githubusercontent.com/u/7552335?v=4", "events_url": "https://api.github.com/users/ibarrien/events{/privacy}", "followers_url": "https://api.github.com/users/ibarrien/followers", "following_url": "https://api.github.com/users/ibarrien/following{/other_user}", "gists_url": "https://api.github.com/users/ibarrien/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ibarrien", "id": 7552335, "login": "ibarrien", "node_id": "MDQ6VXNlcjc1NTIzMzU=", "organizations_url": "https://api.github.com/users/ibarrien/orgs", "received_events_url": "https://api.github.com/users/ibarrien/received_events", "repos_url": "https://api.github.com/users/ibarrien/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ibarrien/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ibarrien/subscriptions", "type": "User", "url": "https://api.github.com/users/ibarrien", "user_view_type": "public" }
[]
closed
false
null
[]
null
0
2025-02-17T01:53:31Z
2025-02-20T17:28:04Z
2025-02-20T17:28:04Z
CONTRIBUTOR
null
null
null
last section ``to_pandas()"
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7407/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7407/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7407.diff", "html_url": "https://github.com/huggingface/datasets/pull/7407", "merged_at": "2025-02-20T17:28:04Z", "patch_url": "https://github.com/huggingface/datasets/pull/7407.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7407" }
https://api.github.com/repos/huggingface/datasets/issues/7406
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7406/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7406/comments
https://api.github.com/repos/huggingface/datasets/issues/7406/events
https://github.com/huggingface/datasets/issues/7406
2,856,441,206
I_kwDODunzps6qQdV2
7,406
Adding Core Maintainer List to CONTRIBUTING.md
{ "avatar_url": "https://avatars.githubusercontent.com/u/93233241?v=4", "events_url": "https://api.github.com/users/jp1924/events{/privacy}", "followers_url": "https://api.github.com/users/jp1924/followers", "following_url": "https://api.github.com/users/jp1924/following{/other_user}", "gists_url": "https://api.github.com/users/jp1924/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jp1924", "id": 93233241, "login": "jp1924", "node_id": "U_kgDOBY6gWQ", "organizations_url": "https://api.github.com/users/jp1924/orgs", "received_events_url": "https://api.github.com/users/jp1924/received_events", "repos_url": "https://api.github.com/users/jp1924/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jp1924/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jp1924/subscriptions", "type": "User", "url": "https://api.github.com/users/jp1924", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
null
[]
null
3
2025-02-17T00:32:40Z
2025-03-24T10:57:54Z
2025-03-24T10:57:54Z
CONTRIBUTOR
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Feature request I propose adding a core maintainer list to the `CONTRIBUTING.md` file. ### Motivation The Transformers and Liger-Kernel projects maintain lists of core maintainers for each module. However, the Datasets project doesn't have such a list. ### Your contribution I have nothing to add here.
{ "avatar_url": "https://avatars.githubusercontent.com/u/93233241?v=4", "events_url": "https://api.github.com/users/jp1924/events{/privacy}", "followers_url": "https://api.github.com/users/jp1924/followers", "following_url": "https://api.github.com/users/jp1924/following{/other_user}", "gists_url": "https://api.github.com/users/jp1924/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jp1924", "id": 93233241, "login": "jp1924", "node_id": "U_kgDOBY6gWQ", "organizations_url": "https://api.github.com/users/jp1924/orgs", "received_events_url": "https://api.github.com/users/jp1924/received_events", "repos_url": "https://api.github.com/users/jp1924/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jp1924/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jp1924/subscriptions", "type": "User", "url": "https://api.github.com/users/jp1924", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7406/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7406/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/7405
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7405/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7405/comments
https://api.github.com/repos/huggingface/datasets/issues/7405/events
https://github.com/huggingface/datasets/issues/7405
2,856,372,814
I_kwDODunzps6qQMpO
7,405
Lazy loading of environment variables
{ "avatar_url": "https://avatars.githubusercontent.com/u/7225987?v=4", "events_url": "https://api.github.com/users/nikvaessen/events{/privacy}", "followers_url": "https://api.github.com/users/nikvaessen/followers", "following_url": "https://api.github.com/users/nikvaessen/following{/other_user}", "gists_url": "https://api.github.com/users/nikvaessen/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/nikvaessen", "id": 7225987, "login": "nikvaessen", "node_id": "MDQ6VXNlcjcyMjU5ODc=", "organizations_url": "https://api.github.com/users/nikvaessen/orgs", "received_events_url": "https://api.github.com/users/nikvaessen/received_events", "repos_url": "https://api.github.com/users/nikvaessen/repos", "site_admin": false, "starred_url": "https://api.github.com/users/nikvaessen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nikvaessen/subscriptions", "type": "User", "url": "https://api.github.com/users/nikvaessen", "user_view_type": "public" }
[]
open
false
null
[]
null
1
2025-02-16T22:31:41Z
2025-02-17T15:17:18Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug Loading a `.env` file after an `import datasets` call does not correctly use the environment variables. This is due the fact that environment variables are read at import time: https://github.com/huggingface/datasets/blob/de062f0552a810c52077543c1169c38c1f0c53fc/src/datasets/config.py#L155C1-L155C80 ### Steps to reproduce the bug ```bash # make tmp dir mkdir -p /tmp/debug-env # make .env file echo HF_HOME=/tmp/debug-env/data > /tmp/debug-env/.env # first load dotenv, downloads to /tmp/debug-env/data uv run --with datasets,python-dotenv python3 -c \ 'import dotenv; dotenv.load_dotenv("/tmp/debug-env/.env"); import datasets; datasets.load_dataset("Anthropic/hh-rlhf")' # first import datasets, downloads to `~/.cache/huggingface` uv run --with datasets,python-dotenv python3 -c \ 'import datasets; import dotenv; dotenv.load_dotenv("/tmp/debug-env/.env"); datasets.load_dataset("Anthropic/hh-rlhf")' ``` ### Expected behavior I expect that setting environment variables with something like this: ```python3 if __name__ == "__main__": load_dotenv() main() ``` works correctly. ### Environment info "datasets>=3.3.0",
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7405/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7405/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7404
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7404/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7404/comments
https://api.github.com/repos/huggingface/datasets/issues/7404/events
https://github.com/huggingface/datasets/issues/7404
2,856,366,207
I_kwDODunzps6qQLB_
7,404
Performance regression in `dataset.filter`
{ "avatar_url": "https://avatars.githubusercontent.com/u/82200?v=4", "events_url": "https://api.github.com/users/ttim/events{/privacy}", "followers_url": "https://api.github.com/users/ttim/followers", "following_url": "https://api.github.com/users/ttim/following{/other_user}", "gists_url": "https://api.github.com/users/ttim/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ttim", "id": 82200, "login": "ttim", "node_id": "MDQ6VXNlcjgyMjAw", "organizations_url": "https://api.github.com/users/ttim/orgs", "received_events_url": "https://api.github.com/users/ttim/received_events", "repos_url": "https://api.github.com/users/ttim/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ttim/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ttim/subscriptions", "type": "User", "url": "https://api.github.com/users/ttim", "user_view_type": "public" }
[]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" } ]
null
3
2025-02-16T22:19:14Z
2025-02-17T17:46:06Z
2025-02-17T14:28:48Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug We're filtering dataset of ~1M (small-ish) records. At some point in the code we do `dataset.filter`, before (including 3.2.0) it was taking couple of seconds, and now it takes 4 hours. We use 16 threads/workers, and stack trace at them look as follows: ``` Traceback (most recent call last): File "/python/lib/python3.12/site-packages/multiprocess/process.py", line 314, in _bootstrap self.run() File "/python/lib/python3.12/site-packages/multiprocess/process.py", line 108, in run self._target(*self._args, **self._kwargs) File "/python/lib/python3.12/site-packages/multiprocess/pool.py", line 125, in worker result = (True, func(*args, **kwds)) ^^^^^^^^^^^^^^^^^^^ File "/python/lib/python3.12/site-packages/datasets/utils/py_utils.py", line 678, in _write_generator_to_queue for i, result in enumerate(func(**kwargs)): File "/python/lib/python3.12/site-packages/datasets/arrow_dataset.py", line 3511, in _map_single for i, batch in iter_outputs(shard_iterable): File "/python/lib/python3.12/site-packages/datasets/arrow_dataset.py", line 3461, in iter_outputs yield i, apply_function(example, i, offset=offset) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/python/lib/python3.12/site-packages/datasets/arrow_dataset.py", line 3390, in apply_function processed_inputs = function(*fn_args, *additional_args, **fn_kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/python/lib/python3.12/site-packages/datasets/arrow_dataset.py", line 6416, in get_indices_from_mask_function indices_array = indices_mapping.column(0).take(indices_array) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "pyarrow/table.pxi", line 1079, in pyarrow.lib.ChunkedArray.take File "/python/lib/python3.12/site-packages/pyarrow/compute.py", line 458, in take def take(data, indices, *, boundscheck=True, memory_pool=None): ``` ### Steps to reproduce the bug 1. Save dataset of 1M records in arrow 2. Filter it with 16 threads 3. Watch it take too long ### Expected behavior Filtering done fast ### Environment info datasets 3.3.0, python 3.12
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7404/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7404/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/7402
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7402/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7402/comments
https://api.github.com/repos/huggingface/datasets/issues/7402/events
https://github.com/huggingface/datasets/pull/7402
2,855,880,858
PR_kwDODunzps6LW8G3
7,402
Fix a typo in arrow_dataset.py
{ "avatar_url": "https://avatars.githubusercontent.com/u/7996256?v=4", "events_url": "https://api.github.com/users/jingedawang/events{/privacy}", "followers_url": "https://api.github.com/users/jingedawang/followers", "following_url": "https://api.github.com/users/jingedawang/following{/other_user}", "gists_url": "https://api.github.com/users/jingedawang/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jingedawang", "id": 7996256, "login": "jingedawang", "node_id": "MDQ6VXNlcjc5OTYyNTY=", "organizations_url": "https://api.github.com/users/jingedawang/orgs", "received_events_url": "https://api.github.com/users/jingedawang/received_events", "repos_url": "https://api.github.com/users/jingedawang/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jingedawang/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jingedawang/subscriptions", "type": "User", "url": "https://api.github.com/users/jingedawang", "user_view_type": "public" }
[]
closed
false
null
[]
null
0
2025-02-16T04:52:02Z
2025-02-20T17:29:28Z
2025-02-20T17:29:28Z
CONTRIBUTOR
null
null
null
"in the feature" should be "in the future"
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7402/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7402/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7402.diff", "html_url": "https://github.com/huggingface/datasets/pull/7402", "merged_at": "2025-02-20T17:29:28Z", "patch_url": "https://github.com/huggingface/datasets/pull/7402.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7402" }
https://api.github.com/repos/huggingface/datasets/issues/7401
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7401/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7401/comments
https://api.github.com/repos/huggingface/datasets/issues/7401/events
https://github.com/huggingface/datasets/pull/7401
2,853,260,869
PR_kwDODunzps6LOMSo
7,401
set dev version
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
1
2025-02-14T10:17:03Z
2025-02-14T10:19:20Z
2025-02-14T10:17:13Z
MEMBER
null
null
null
null
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7401/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7401/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7401.diff", "html_url": "https://github.com/huggingface/datasets/pull/7401", "merged_at": "2025-02-14T10:17:13Z", "patch_url": "https://github.com/huggingface/datasets/pull/7401.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7401" }
https://api.github.com/repos/huggingface/datasets/issues/7399
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7399/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7399/comments
https://api.github.com/repos/huggingface/datasets/issues/7399/events
https://github.com/huggingface/datasets/issues/7399
2,853,098,442
I_kwDODunzps6qDtPK
7,399
Synchronize parameters for various datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/7976840?v=4", "events_url": "https://api.github.com/users/grofte/events{/privacy}", "followers_url": "https://api.github.com/users/grofte/followers", "following_url": "https://api.github.com/users/grofte/following{/other_user}", "gists_url": "https://api.github.com/users/grofte/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/grofte", "id": 7976840, "login": "grofte", "node_id": "MDQ6VXNlcjc5NzY4NDA=", "organizations_url": "https://api.github.com/users/grofte/orgs", "received_events_url": "https://api.github.com/users/grofte/received_events", "repos_url": "https://api.github.com/users/grofte/repos", "site_admin": false, "starred_url": "https://api.github.com/users/grofte/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/grofte/subscriptions", "type": "User", "url": "https://api.github.com/users/grofte", "user_view_type": "public" }
[]
open
false
null
[]
null
2
2025-02-14T09:15:11Z
2025-02-19T11:50:29Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug [IterableDatasetDict](https://huggingface.co/docs/datasets/v3.2.0/en/package_reference/main_classes#datasets.IterableDatasetDict.map) map function is missing the `desc` parameter. You can see the equivalent map function for [Dataset here](https://huggingface.co/docs/datasets/v3.2.0/en/package_reference/main_classes#datasets.Dataset.map). There might be other parameters missing - I haven't checked. ### Steps to reproduce the bug from datasets import Dataset, IterableDataset, IterableDatasetDict ds = IterableDatasetDict({"train": Dataset.from_dict({"a": range(6)}).to_iterable_dataset(num_shards=3), "validate": Dataset.from_dict({"a": range(6)}).to_iterable_dataset(num_shards=3)}) for d in ds["train"]: print(d) ds = ds.map(lambda x: {k: v+1 for k, v in x.items()}, desc="increment") for d in ds["train"]: print(d) ### Expected behavior The description parameter should be available for all datasets (or none). ### Environment info - `datasets` version: 3.2.0 - Platform: Linux-6.1.85+-x86_64-with-glibc2.35 - Python version: 3.11.11 - `huggingface_hub` version: 0.28.1 - PyArrow version: 17.0.0 - Pandas version: 2.2.2 - `fsspec` version: 2024.9.0
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7399/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7399/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7398
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7398/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7398/comments
https://api.github.com/repos/huggingface/datasets/issues/7398/events
https://github.com/huggingface/datasets/pull/7398
2,853,097,869
PR_kwDODunzps6LNoDk
7,398
Release: 3.3.0
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
1
2025-02-14T09:15:03Z
2025-02-14T09:57:39Z
2025-02-14T09:57:37Z
MEMBER
null
null
null
null
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7398/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7398/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7398.diff", "html_url": "https://github.com/huggingface/datasets/pull/7398", "merged_at": "2025-02-14T09:57:37Z", "patch_url": "https://github.com/huggingface/datasets/pull/7398.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7398" }
https://api.github.com/repos/huggingface/datasets/issues/7397
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7397/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7397/comments
https://api.github.com/repos/huggingface/datasets/issues/7397/events
https://github.com/huggingface/datasets/pull/7397
2,852,829,763
PR_kwDODunzps6LMuQD
7,397
Kannada dataset(Conversations, Wikipedia etc)
{ "avatar_url": "https://avatars.githubusercontent.com/u/146451281?v=4", "events_url": "https://api.github.com/users/Likhith2612/events{/privacy}", "followers_url": "https://api.github.com/users/Likhith2612/followers", "following_url": "https://api.github.com/users/Likhith2612/following{/other_user}", "gists_url": "https://api.github.com/users/Likhith2612/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Likhith2612", "id": 146451281, "login": "Likhith2612", "node_id": "U_kgDOCLqrUQ", "organizations_url": "https://api.github.com/users/Likhith2612/orgs", "received_events_url": "https://api.github.com/users/Likhith2612/received_events", "repos_url": "https://api.github.com/users/Likhith2612/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Likhith2612/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Likhith2612/subscriptions", "type": "User", "url": "https://api.github.com/users/Likhith2612", "user_view_type": "public" }
[]
closed
false
null
[]
null
1
2025-02-14T06:53:03Z
2025-02-20T17:28:54Z
2025-02-20T17:28:53Z
NONE
null
null
null
null
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7397/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7397/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7397.diff", "html_url": "https://github.com/huggingface/datasets/pull/7397", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/7397.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7397" }
https://api.github.com/repos/huggingface/datasets/issues/7400
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7400/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7400/comments
https://api.github.com/repos/huggingface/datasets/issues/7400/events
https://github.com/huggingface/datasets/issues/7400
2,853,201,277
I_kwDODunzps6qEGV9
7,400
504 Gateway Timeout when uploading large dataset to Hugging Face Hub
{ "avatar_url": "https://avatars.githubusercontent.com/u/3500?v=4", "events_url": "https://api.github.com/users/hotchpotch/events{/privacy}", "followers_url": "https://api.github.com/users/hotchpotch/followers", "following_url": "https://api.github.com/users/hotchpotch/following{/other_user}", "gists_url": "https://api.github.com/users/hotchpotch/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/hotchpotch", "id": 3500, "login": "hotchpotch", "node_id": "MDQ6VXNlcjM1MDA=", "organizations_url": "https://api.github.com/users/hotchpotch/orgs", "received_events_url": "https://api.github.com/users/hotchpotch/received_events", "repos_url": "https://api.github.com/users/hotchpotch/repos", "site_admin": false, "starred_url": "https://api.github.com/users/hotchpotch/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hotchpotch/subscriptions", "type": "User", "url": "https://api.github.com/users/hotchpotch", "user_view_type": "public" }
[]
open
false
null
[]
null
4
2025-02-14T02:18:35Z
2025-02-14T23:48:36Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Description I encountered consistent 504 Gateway Timeout errors while attempting to upload a large dataset (approximately 500GB) to the Hugging Face Hub. The upload fails during the process with a Gateway Timeout error. I will continue trying to upload. While it might succeed in future attempts, I wanted to report this issue in the meantime. ### Reproduction - I attempted the upload 3 times - Each attempt resulted in the same 504 error during the upload process (not at the start, but in the middle of the upload) - Using `dataset.push_to_hub()` method ### Environment Information ``` - huggingface_hub version: 0.28.0 - Platform: Linux-6.8.0-52-generic-x86_64-with-glibc2.39 - Python version: 3.11.10 - Running in iPython ?: No - Running in notebook ?: No - Running in Google Colab ?: No - Running in Google Colab Enterprise ?: No - Token path ?: /home/hotchpotch/.cache/huggingface/token - Has saved token ?: True - Who am I ?: hotchpotch - Configured git credential helpers: store - FastAI: N/A - Tensorflow: N/A - Torch: 2.5.1 - Jinja2: 3.1.5 - Graphviz: N/A - keras: N/A - Pydot: N/A - Pillow: 10.4.0 - hf_transfer: N/A - gradio: N/A - tensorboard: N/A - numpy: 1.26.4 - pydantic: 2.10.6 - aiohttp: 3.11.11 - ENDPOINT: https://huggingface.co - HF_HUB_CACHE: /home/hotchpotch/.cache/huggingface/hub - HF_ASSETS_CACHE: /home/hotchpotch/.cache/huggingface/assets - HF_TOKEN_PATH: /home/hotchpotch/.cache/huggingface/token - HF_STORED_TOKENS_PATH: /home/hotchpotch/.cache/huggingface/stored_tokens - HF_HUB_OFFLINE: False - HF_HUB_DISABLE_TELEMETRY: False - HF_HUB_DISABLE_PROGRESS_BARS: None - HF_HUB_DISABLE_SYMLINKS_WARNING: False - HF_HUB_DISABLE_EXPERIMENTAL_WARNING: False - HF_HUB_DISABLE_IMPLICIT_TOKEN: False - HF_HUB_ENABLE_HF_TRANSFER: False - HF_HUB_ETAG_TIMEOUT: 10 - HF_HUB_DOWNLOAD_TIMEOUT: 10 ``` ### Full Error Traceback ```python Traceback (most recent call last): File "/home/hotchpotch/src/github.com/hotchpotch/fineweb-2-edu-classifier-japanese/.venv/lib/python3.11/site-packages/huggingface_hub/utils/_http.py", line 406, in hf_raise_for_status response.raise_for_status() File "/home/hotchpotch/src/github.com/hotchpotch/fineweb-2-edu-classifier-japanese/.venv/lib/python3.11/site-packages/requests/models.py", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 504 Server Error: Gateway Time-out for url: https://huggingface.co/datasets/hotchpotch/fineweb-2-edu-japanese.git/info/lfs/objects/batch The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/home/hotchpotch/src/github.com/hotchpotch/fineweb-2-edu-classifier-japanese/create_edu_japanese_ds/upload_edu_japanese_ds.py", line 12, in <module> ds.push_to_hub("hotchpotch/fineweb-2-edu-japanese", private=True) File "/home/hotchpotch/src/github.com/hotchpotch/fineweb-2-edu-classifier-japanese/.venv/lib/python3.11/site-packages/datasets/dataset_dict.py", line 1665, in push_to_hub split_additions, uploaded_size, dataset_nbytes = self[split]._push_parquet_shards_to_hub( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/hotchpotch/src/github.com/hotchpotch/fineweb-2-edu-classifier-japanese/.venv/lib/python3.11/site-packages/datasets/arrow_dataset.py", line 5301, in _push_parquet_shards_to_hub api.preupload_lfs_files( File "/home/hotchpotch/src/github.com/hotchpotch/fineweb-2-edu-classifier-japanese/.venv/lib/python3.11/site-packages/huggingface_hub/hf_api.py", line 4215, in preupload_lfs_files _upload_lfs_files( File "/home/hotchpotch/src/github.com/hotchpotch/fineweb-2-edu-classifier-japanese/.venv/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn return fn(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^ File "/home/hotchpotch/src/github.com/hotchpotch/fineweb-2-edu-classifier-japanese/.venv/lib/python3.11/site-packages/huggingface_hub/_commit_api.py", line 395, in _upload_lfs_files batch_actions_chunk, batch_errors_chunk = post_lfs_batch_info( ^^^^^^^^^^^^^^^^^^^^ File "/home/hotchpotch/src/github.com/hotchpotch/fineweb-2-edu-classifier-japanese/.venv/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn return fn(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^ File "/home/hotchpotch/src/github.com/hotchpotch/fineweb-2-edu-classifier-japanese/.venv/lib/python3.11/site-packages/huggingface_hub/lfs.py", line 168, in post_lfs_batch_info hf_raise_for_status(resp) File "/home/hotchpotch/src/github.com/hotchpotch/fineweb-2-edu-classifier-japanese/.venv/lib/python3.11/site-packages/huggingface_hub/utils/_http.py", line 477, in hf_raise_for_status raise _format(HfHubHTTPError, str(e), response) from e huggingface_hub.errors.HfHubHTTPError: 504 Server Error: Gateway Time-out for url: https://huggingface.co/datasets/hotchpotch/fineweb-2-edu-japanese.git/info/lfs/objects/batch ```
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7400/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7400/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7396
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7396/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7396/comments
https://api.github.com/repos/huggingface/datasets/issues/7396/events
https://github.com/huggingface/datasets/pull/7396
2,851,716,755
PR_kwDODunzps6LJBmT
7,396
Update README.md
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
1
2025-02-13T17:44:36Z
2025-02-13T17:46:57Z
2025-02-13T17:44:51Z
MEMBER
null
null
null
null
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7396/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7396/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7396.diff", "html_url": "https://github.com/huggingface/datasets/pull/7396", "merged_at": "2025-02-13T17:44:51Z", "patch_url": "https://github.com/huggingface/datasets/pull/7396.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7396" }
https://api.github.com/repos/huggingface/datasets/issues/7395
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7395/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7395/comments
https://api.github.com/repos/huggingface/datasets/issues/7395/events
https://github.com/huggingface/datasets/pull/7395
2,851,575,160
PR_kwDODunzps6LIivQ
7,395
Update docs
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
1
2025-02-13T16:43:15Z
2025-02-13T17:20:32Z
2025-02-13T17:20:30Z
MEMBER
null
null
null
- update min python version - replace canonical dataset names with new names - avoid examples with trust_remote_code
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7395/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7395/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7395.diff", "html_url": "https://github.com/huggingface/datasets/pull/7395", "merged_at": "2025-02-13T17:20:29Z", "patch_url": "https://github.com/huggingface/datasets/pull/7395.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7395" }
https://api.github.com/repos/huggingface/datasets/issues/7394
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7394/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7394/comments
https://api.github.com/repos/huggingface/datasets/issues/7394/events
https://github.com/huggingface/datasets/issues/7394
2,847,172,115
I_kwDODunzps6ptGYT
7,394
Using load_dataset with data_files and split arguments yields an error
{ "avatar_url": "https://avatars.githubusercontent.com/u/61103399?v=4", "events_url": "https://api.github.com/users/devon-research/events{/privacy}", "followers_url": "https://api.github.com/users/devon-research/followers", "following_url": "https://api.github.com/users/devon-research/following{/other_user}", "gists_url": "https://api.github.com/users/devon-research/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/devon-research", "id": 61103399, "login": "devon-research", "node_id": "MDQ6VXNlcjYxMTAzMzk5", "organizations_url": "https://api.github.com/users/devon-research/orgs", "received_events_url": "https://api.github.com/users/devon-research/received_events", "repos_url": "https://api.github.com/users/devon-research/repos", "site_admin": false, "starred_url": "https://api.github.com/users/devon-research/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/devon-research/subscriptions", "type": "User", "url": "https://api.github.com/users/devon-research", "user_view_type": "public" }
[]
open
false
null
[]
null
0
2025-02-12T04:50:11Z
2025-02-12T04:50:11Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug It seems the list of valid splits recorded by the package becomes incorrectly overwritten when using the `data_files` argument. If I run ```python from datasets import load_dataset load_dataset("allenai/super", split="all_examples", data_files="tasks/expert.jsonl") ``` then I get the error ``` ValueError: Unknown split "all_examples". Should be one of ['train']. ``` However, if I run ```python from datasets import load_dataset load_dataset("allenai/super", split="train", name="Expert") ``` then I get ``` ValueError: Unknown split "train". Should be one of ['all_examples']. ``` ### Steps to reproduce the bug Run ```python from datasets import load_dataset load_dataset("allenai/super", split="all_examples", data_files="tasks/expert.jsonl") ``` ### Expected behavior No error. ### Environment info Python = 3.12 datasets = 3.2.0
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7394/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7394/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7393
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7393/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7393/comments
https://api.github.com/repos/huggingface/datasets/issues/7393/events
https://github.com/huggingface/datasets/pull/7393
2,846,446,674
PR_kwDODunzps6K3DiZ
7,393
Optimized sequence encoding for scalars
{ "avatar_url": "https://avatars.githubusercontent.com/u/38319063?v=4", "events_url": "https://api.github.com/users/lukasgd/events{/privacy}", "followers_url": "https://api.github.com/users/lukasgd/followers", "following_url": "https://api.github.com/users/lukasgd/following{/other_user}", "gists_url": "https://api.github.com/users/lukasgd/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lukasgd", "id": 38319063, "login": "lukasgd", "node_id": "MDQ6VXNlcjM4MzE5MDYz", "organizations_url": "https://api.github.com/users/lukasgd/orgs", "received_events_url": "https://api.github.com/users/lukasgd/received_events", "repos_url": "https://api.github.com/users/lukasgd/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lukasgd/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lukasgd/subscriptions", "type": "User", "url": "https://api.github.com/users/lukasgd", "user_view_type": "public" }
[]
closed
false
null
[]
null
1
2025-02-11T20:30:44Z
2025-02-13T17:11:33Z
2025-02-13T17:11:32Z
CONTRIBUTOR
null
null
null
The change in https://github.com/huggingface/datasets/pull/3197 introduced redundant list-comprehensions when `obj` is a long sequence of scalars. This becomes a noticeable overhead when loading data from an `IterableDataset` in the function `_apply_feature_types_on_example` and can be eliminated by adding a check for scalars in `encode_nested_example` proposed here. In the following code example ``` import time from datasets.features import Sequence, Value from datasets.features.features import encode_nested_example schema = Sequence(Value("int32")) obj = list(range(100000)) start = time.perf_counter() result = encode_nested_example(schema, obj) stop = time.perf_counter() print(f"Time spent is {stop-start} sec") ``` `encode_nested_example` becomes 492x faster (from 0.0769 to 0.0002 sec), respectively 322x (from 0.00814 to 0.00003 sec) for a list of length 10000, on a GH200 system, making it unnoticeable when loading data with tokenization. Another change is made to avoid creating arrays from scalars and afterwards re-extracting them during casting to python (`obj == obj.__array__()[()]` in that case), which avoids a regression in the array write benchmarks.
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7393/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7393/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7393.diff", "html_url": "https://github.com/huggingface/datasets/pull/7393", "merged_at": "2025-02-13T17:11:32Z", "patch_url": "https://github.com/huggingface/datasets/pull/7393.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7393" }
https://api.github.com/repos/huggingface/datasets/issues/7392
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7392/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7392/comments
https://api.github.com/repos/huggingface/datasets/issues/7392/events
https://github.com/huggingface/datasets/issues/7392
2,846,095,043
I_kwDODunzps6po_bD
7,392
push_to_hub payload too large error when using large ClassLabel feature
{ "avatar_url": "https://avatars.githubusercontent.com/u/35470740?v=4", "events_url": "https://api.github.com/users/DavidRConnell/events{/privacy}", "followers_url": "https://api.github.com/users/DavidRConnell/followers", "following_url": "https://api.github.com/users/DavidRConnell/following{/other_user}", "gists_url": "https://api.github.com/users/DavidRConnell/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/DavidRConnell", "id": 35470740, "login": "DavidRConnell", "node_id": "MDQ6VXNlcjM1NDcwNzQw", "organizations_url": "https://api.github.com/users/DavidRConnell/orgs", "received_events_url": "https://api.github.com/users/DavidRConnell/received_events", "repos_url": "https://api.github.com/users/DavidRConnell/repos", "site_admin": false, "starred_url": "https://api.github.com/users/DavidRConnell/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/DavidRConnell/subscriptions", "type": "User", "url": "https://api.github.com/users/DavidRConnell", "user_view_type": "public" }
[]
open
false
null
[]
null
1
2025-02-11T17:51:34Z
2025-02-11T18:01:31Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug When using `datasets.DatasetDict.push_to_hub` an `HfHubHTTPError: 413 Client Error: Payload Too Large for url` is raised if the dataset contains a large `ClassLabel` feature. Even if the total size of the dataset is small. ### Steps to reproduce the bug ``` python import random import sys import datasets random.seed(42) def random_str(sz): return "".join(chr(random.randint(ord("a"), ord("z"))) for _ in range(sz)) data = datasets.DatasetDict( { str(i): datasets.Dataset.from_dict( { "label": [list(range(3)) for _ in range(10)], "abstract": [random_str(10_000) for _ in range(10)], }, ) for i in range(3) } ) features = data["1"].features.copy() features["label"] = datasets.Sequence( datasets.ClassLabel(names=[str(i) for i in range(50_000)]) ) data = data.map(lambda examples: {}, features=features) feat_size = sys.getsizeof(data["1"].features["label"].feature.names) print(f"Size of ClassLabel names: {feat_size}") # Size of ClassLabel names: 444376 data.push_to_hub("dconnell/pubtator3_test") ``` Note that this succeeds if `ClassLabel` has fewer names or if `ClassLabel` is replaced with `Value("int64")` ### Expected behavior Should push the dataset to hub. ### Environment info Copy-and-paste the text below in your GitHub issue. - `datasets` version: 3.2.0 - Platform: Linux-5.15.0-126-generic-x86_64-with-glibc2.35 - Python version: 3.12.8 - `huggingface_hub` version: 0.28.1 - PyArrow version: 19.0.0 - Pandas version: 2.2.3 - `fsspec` version: 2024.9.0
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7392/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7392/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7391
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7391/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7391/comments
https://api.github.com/repos/huggingface/datasets/issues/7391/events
https://github.com/huggingface/datasets/issues/7391
2,845,184,764
I_kwDODunzps6plhL8
7,391
AttributeError: module 'pyarrow.lib' has no attribute 'ListViewType'
{ "avatar_url": "https://avatars.githubusercontent.com/u/25193686?v=4", "events_url": "https://api.github.com/users/LinXin04/events{/privacy}", "followers_url": "https://api.github.com/users/LinXin04/followers", "following_url": "https://api.github.com/users/LinXin04/following{/other_user}", "gists_url": "https://api.github.com/users/LinXin04/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/LinXin04", "id": 25193686, "login": "LinXin04", "node_id": "MDQ6VXNlcjI1MTkzNjg2", "organizations_url": "https://api.github.com/users/LinXin04/orgs", "received_events_url": "https://api.github.com/users/LinXin04/received_events", "repos_url": "https://api.github.com/users/LinXin04/repos", "site_admin": false, "starred_url": "https://api.github.com/users/LinXin04/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/LinXin04/subscriptions", "type": "User", "url": "https://api.github.com/users/LinXin04", "user_view_type": "public" }
[]
open
false
null
[]
null
0
2025-02-11T12:02:26Z
2025-02-11T12:02:26Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
pyarrow 尝试了若干个版本都不可以
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7391/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7391/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7390
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7390/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7390/comments
https://api.github.com/repos/huggingface/datasets/issues/7390/events
https://github.com/huggingface/datasets/issues/7390
2,843,813,365
I_kwDODunzps6pgSX1
7,390
Re-add py.typed
{ "avatar_url": "https://avatars.githubusercontent.com/u/730137?v=4", "events_url": "https://api.github.com/users/NeilGirdhar/events{/privacy}", "followers_url": "https://api.github.com/users/NeilGirdhar/followers", "following_url": "https://api.github.com/users/NeilGirdhar/following{/other_user}", "gists_url": "https://api.github.com/users/NeilGirdhar/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/NeilGirdhar", "id": 730137, "login": "NeilGirdhar", "node_id": "MDQ6VXNlcjczMDEzNw==", "organizations_url": "https://api.github.com/users/NeilGirdhar/orgs", "received_events_url": "https://api.github.com/users/NeilGirdhar/received_events", "repos_url": "https://api.github.com/users/NeilGirdhar/repos", "site_admin": false, "starred_url": "https://api.github.com/users/NeilGirdhar/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/NeilGirdhar/subscriptions", "type": "User", "url": "https://api.github.com/users/NeilGirdhar", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
null
[]
null
0
2025-02-10T22:12:52Z
2025-02-10T22:12:52Z
null
CONTRIBUTOR
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Feature request The motivation for removing py.typed no longer seems to apply. Would a solution like [this one](https://github.com/huggingface/huggingface_hub/pull/2752) work here? ### Motivation MyPy support is broken. As more type checkers come out, such as RedKnot, these may also be broken. It would be good to be PEP 561 compliant as long as it's not too onerous. ### Your contribution I can re-add py.typed, but I don't know how to make sur all of the `__all__` files are provided (although you may not need to with modern PyRight).
null
{ "+1": 8, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 8, "url": "https://api.github.com/repos/huggingface/datasets/issues/7390/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7390/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7389
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7389/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7389/comments
https://api.github.com/repos/huggingface/datasets/issues/7389/events
https://github.com/huggingface/datasets/issues/7389
2,843,592,606
I_kwDODunzps6pfcee
7,389
Getting statistics about filtered examples
{ "avatar_url": "https://avatars.githubusercontent.com/u/511073?v=4", "events_url": "https://api.github.com/users/jonathanasdf/events{/privacy}", "followers_url": "https://api.github.com/users/jonathanasdf/followers", "following_url": "https://api.github.com/users/jonathanasdf/following{/other_user}", "gists_url": "https://api.github.com/users/jonathanasdf/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jonathanasdf", "id": 511073, "login": "jonathanasdf", "node_id": "MDQ6VXNlcjUxMTA3Mw==", "organizations_url": "https://api.github.com/users/jonathanasdf/orgs", "received_events_url": "https://api.github.com/users/jonathanasdf/received_events", "repos_url": "https://api.github.com/users/jonathanasdf/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jonathanasdf/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jonathanasdf/subscriptions", "type": "User", "url": "https://api.github.com/users/jonathanasdf", "user_view_type": "public" }
[]
closed
false
null
[]
null
2
2025-02-10T20:48:29Z
2025-02-11T20:44:15Z
2025-02-11T20:44:13Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
@lhoestq wondering if the team has thought about this and if there are any recommendations? Currently when processing datasets some examples are bound to get filtered out, whether it's due to bad format, or length is too long, or any other custom filters that might be getting applied. Let's just focus on the filter by length for now, since that would be something that gets applied dynamically for each training run. Say we want to show a graph in W&B with the running total of the number of filtered examples so far. What would be a good way to go about hooking this up? Because the map/filter operations happen before the DataLoader batches are created, at training time if we're just grabbing batches from the DataLoader then we won't know how many things have been filtered already. But there's not really a good way to include a 'num_filtered' key into the dataset itself either because dataset map/filter process examples independently and don't have a way to track a running sum. The only approach I can kind of think of is having a 'is_filtered' key in the dataset, and then creating a custom batcher/collator that reads that and tracks the metric?
{ "avatar_url": "https://avatars.githubusercontent.com/u/511073?v=4", "events_url": "https://api.github.com/users/jonathanasdf/events{/privacy}", "followers_url": "https://api.github.com/users/jonathanasdf/followers", "following_url": "https://api.github.com/users/jonathanasdf/following{/other_user}", "gists_url": "https://api.github.com/users/jonathanasdf/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jonathanasdf", "id": 511073, "login": "jonathanasdf", "node_id": "MDQ6VXNlcjUxMTA3Mw==", "organizations_url": "https://api.github.com/users/jonathanasdf/orgs", "received_events_url": "https://api.github.com/users/jonathanasdf/received_events", "repos_url": "https://api.github.com/users/jonathanasdf/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jonathanasdf/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jonathanasdf/subscriptions", "type": "User", "url": "https://api.github.com/users/jonathanasdf", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7389/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7389/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/7388
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7388/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7388/comments
https://api.github.com/repos/huggingface/datasets/issues/7388/events
https://github.com/huggingface/datasets/issues/7388
2,843,188,499
I_kwDODunzps6pd50T
7,388
OSError: [Errno 22] Invalid argument forbidden character
{ "avatar_url": "https://avatars.githubusercontent.com/u/124634542?v=4", "events_url": "https://api.github.com/users/langflogit/events{/privacy}", "followers_url": "https://api.github.com/users/langflogit/followers", "following_url": "https://api.github.com/users/langflogit/following{/other_user}", "gists_url": "https://api.github.com/users/langflogit/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/langflogit", "id": 124634542, "login": "langflogit", "node_id": "U_kgDOB23Frg", "organizations_url": "https://api.github.com/users/langflogit/orgs", "received_events_url": "https://api.github.com/users/langflogit/received_events", "repos_url": "https://api.github.com/users/langflogit/repos", "site_admin": false, "starred_url": "https://api.github.com/users/langflogit/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/langflogit/subscriptions", "type": "User", "url": "https://api.github.com/users/langflogit", "user_view_type": "public" }
[]
closed
false
null
[]
null
2
2025-02-10T17:46:31Z
2025-02-11T13:42:32Z
2025-02-11T13:42:30Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug I'm on Windows and i'm trying to load a datasets but i'm having title error because files in the repository are named with charactere like < >which can't be in a name file. Could it be possible to load this datasets but removing those charactere ? ### Steps to reproduce the bug load_dataset("CATMuS/medieval") on Windows ### Expected behavior Making the function to erase the forbidden character to allow loading the datasets who have those characters. ### Environment info - `datasets` version: 3.2.0 - Platform: Windows-10-10.0.19045-SP0 - Python version: 3.12.2 - `huggingface_hub` version: 0.28.1 - PyArrow version: 19.0.0 - Pandas version: 2.2.3 - `fsspec` version: 2024.9.0
{ "avatar_url": "https://avatars.githubusercontent.com/u/124634542?v=4", "events_url": "https://api.github.com/users/langflogit/events{/privacy}", "followers_url": "https://api.github.com/users/langflogit/followers", "following_url": "https://api.github.com/users/langflogit/following{/other_user}", "gists_url": "https://api.github.com/users/langflogit/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/langflogit", "id": 124634542, "login": "langflogit", "node_id": "U_kgDOB23Frg", "organizations_url": "https://api.github.com/users/langflogit/orgs", "received_events_url": "https://api.github.com/users/langflogit/received_events", "repos_url": "https://api.github.com/users/langflogit/repos", "site_admin": false, "starred_url": "https://api.github.com/users/langflogit/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/langflogit/subscriptions", "type": "User", "url": "https://api.github.com/users/langflogit", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7388/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7388/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/7387
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7387/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7387/comments
https://api.github.com/repos/huggingface/datasets/issues/7387/events
https://github.com/huggingface/datasets/issues/7387
2,841,228,048
I_kwDODunzps6pWbMQ
7,387
Dynamic adjusting dataloader sampling weight
{ "avatar_url": "https://avatars.githubusercontent.com/u/72799643?v=4", "events_url": "https://api.github.com/users/whc688/events{/privacy}", "followers_url": "https://api.github.com/users/whc688/followers", "following_url": "https://api.github.com/users/whc688/following{/other_user}", "gists_url": "https://api.github.com/users/whc688/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/whc688", "id": 72799643, "login": "whc688", "node_id": "MDQ6VXNlcjcyNzk5NjQz", "organizations_url": "https://api.github.com/users/whc688/orgs", "received_events_url": "https://api.github.com/users/whc688/received_events", "repos_url": "https://api.github.com/users/whc688/repos", "site_admin": false, "starred_url": "https://api.github.com/users/whc688/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/whc688/subscriptions", "type": "User", "url": "https://api.github.com/users/whc688", "user_view_type": "public" }
[]
open
false
null
[]
null
3
2025-02-10T03:18:47Z
2025-03-07T14:06:54Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
Hi, Thanks for your wonderful work! I'm wondering is there a way to dynamically adjust the sampling weight of each data in the dataset during training? Looking forward to your reply, thanks again.
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7387/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7387/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7386
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7386/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7386/comments
https://api.github.com/repos/huggingface/datasets/issues/7386/events
https://github.com/huggingface/datasets/issues/7386
2,840,032,524
I_kwDODunzps6pR3UM
7,386
Add bookfolder Dataset Builder for Digital Book Formats
{ "avatar_url": "https://avatars.githubusercontent.com/u/22115108?v=4", "events_url": "https://api.github.com/users/shikanime/events{/privacy}", "followers_url": "https://api.github.com/users/shikanime/followers", "following_url": "https://api.github.com/users/shikanime/following{/other_user}", "gists_url": "https://api.github.com/users/shikanime/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/shikanime", "id": 22115108, "login": "shikanime", "node_id": "MDQ6VXNlcjIyMTE1MTA4", "organizations_url": "https://api.github.com/users/shikanime/orgs", "received_events_url": "https://api.github.com/users/shikanime/received_events", "repos_url": "https://api.github.com/users/shikanime/repos", "site_admin": false, "starred_url": "https://api.github.com/users/shikanime/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/shikanime/subscriptions", "type": "User", "url": "https://api.github.com/users/shikanime", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
null
[]
null
1
2025-02-08T14:27:55Z
2025-02-08T14:30:10Z
2025-02-08T14:30:09Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Feature request This feature proposes adding a new dataset builder called bookfolder to the datasets library. This builder would allow users to easily load datasets consisting of various digital book formats, including: AZW, AZW3, CB7, CBR, CBT, CBZ, EPUB, MOBI, and PDF. ### Motivation Currently, loading datasets of these digital book files requires manual effort. This would also lower the barrier to entry for working with these formats, enabling more diverse and interesting datasets to be used within the Hugging Face ecosystem. ### Your contribution This feature is rather simple as it will be based on the folder-based builder, similar to imagefolder. I'm willing to contribute to this feature by submitting a PR
{ "avatar_url": "https://avatars.githubusercontent.com/u/22115108?v=4", "events_url": "https://api.github.com/users/shikanime/events{/privacy}", "followers_url": "https://api.github.com/users/shikanime/followers", "following_url": "https://api.github.com/users/shikanime/following{/other_user}", "gists_url": "https://api.github.com/users/shikanime/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/shikanime", "id": 22115108, "login": "shikanime", "node_id": "MDQ6VXNlcjIyMTE1MTA4", "organizations_url": "https://api.github.com/users/shikanime/orgs", "received_events_url": "https://api.github.com/users/shikanime/received_events", "repos_url": "https://api.github.com/users/shikanime/repos", "site_admin": false, "starred_url": "https://api.github.com/users/shikanime/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/shikanime/subscriptions", "type": "User", "url": "https://api.github.com/users/shikanime", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7386/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7386/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/7385
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7385/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7385/comments
https://api.github.com/repos/huggingface/datasets/issues/7385/events
https://github.com/huggingface/datasets/pull/7385
2,830,664,522
PR_kwDODunzps6KBO6i
7,385
Make IterableDataset (optionally) resumable
{ "avatar_url": "https://avatars.githubusercontent.com/u/18402347?v=4", "events_url": "https://api.github.com/users/yzhangcs/events{/privacy}", "followers_url": "https://api.github.com/users/yzhangcs/followers", "following_url": "https://api.github.com/users/yzhangcs/following{/other_user}", "gists_url": "https://api.github.com/users/yzhangcs/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/yzhangcs", "id": 18402347, "login": "yzhangcs", "node_id": "MDQ6VXNlcjE4NDAyMzQ3", "organizations_url": "https://api.github.com/users/yzhangcs/orgs", "received_events_url": "https://api.github.com/users/yzhangcs/received_events", "repos_url": "https://api.github.com/users/yzhangcs/repos", "site_admin": false, "starred_url": "https://api.github.com/users/yzhangcs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yzhangcs/subscriptions", "type": "User", "url": "https://api.github.com/users/yzhangcs", "user_view_type": "public" }
[]
open
false
null
[]
null
2
2025-02-04T15:55:33Z
2025-03-03T17:31:40Z
null
CONTRIBUTOR
null
null
null
### What does this PR do? This PR introduces a new `stateful` option to the `dataset.shuffle` method, which defaults to `False`. When enabled, this option allows for resumable shuffling of `IterableDataset` instances, albeit with some additional memory overhead. Key points: * All tests have passed * Docstrings have been updated to reflect the new functionality I'm very looking forward to receiving feedback on this implementation! @lhoestq
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7385/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7385/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7385.diff", "html_url": "https://github.com/huggingface/datasets/pull/7385", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/7385.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7385" }
https://api.github.com/repos/huggingface/datasets/issues/7384
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7384/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7384/comments
https://api.github.com/repos/huggingface/datasets/issues/7384/events
https://github.com/huggingface/datasets/pull/7384
2,828,208,828
PR_kwDODunzps6J4wVi
7,384
Support async functions in map()
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
2
2025-02-03T18:18:40Z
2025-02-13T14:01:13Z
2025-02-13T14:00:06Z
MEMBER
null
null
null
e.g. to download images or call an inference API like HF or vLLM ```python import asyncio import random from datasets import Dataset async def f(x): await asyncio.sleep(random.random()) ds = Dataset.from_dict({"data": range(100)}) ds.map(f) # Map: 100%|█████████████████████████████| 100/100 [00:01<00:00, 99.81 examples/s] ``` TODO - [x] clean code (right now it's a big copy paste) - [x] batched - [x] Dataset.map() - [x] IterableDataset.map() - [x] Dataset.filter() - [x] IterableDataset.filter() - [x] test - [x] docs
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 3, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 3, "url": "https://api.github.com/repos/huggingface/datasets/issues/7384/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7384/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7384.diff", "html_url": "https://github.com/huggingface/datasets/pull/7384", "merged_at": "2025-02-13T14:00:06Z", "patch_url": "https://github.com/huggingface/datasets/pull/7384.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7384" }
https://api.github.com/repos/huggingface/datasets/issues/7382
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7382/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7382/comments
https://api.github.com/repos/huggingface/datasets/issues/7382/events
https://github.com/huggingface/datasets/pull/7382
2,823,480,924
PR_kwDODunzps6Jo69f
7,382
Add Pandas, PyArrow and Polars docs
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
1
2025-01-31T13:22:59Z
2025-01-31T16:30:59Z
2025-01-31T16:30:57Z
MEMBER
null
null
null
(also added the missing numpy docs and fixed a small bug in pyarrow formatting)
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7382/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7382/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7382.diff", "html_url": "https://github.com/huggingface/datasets/pull/7382", "merged_at": "2025-01-31T16:30:57Z", "patch_url": "https://github.com/huggingface/datasets/pull/7382.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7382" }
https://api.github.com/repos/huggingface/datasets/issues/7381
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7381/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7381/comments
https://api.github.com/repos/huggingface/datasets/issues/7381/events
https://github.com/huggingface/datasets/issues/7381
2,815,649,092
I_kwDODunzps6n02VE
7,381
Iterating over values of a column in the IterableDataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/47208659?v=4", "events_url": "https://api.github.com/users/TopCoder2K/events{/privacy}", "followers_url": "https://api.github.com/users/TopCoder2K/followers", "following_url": "https://api.github.com/users/TopCoder2K/following{/other_user}", "gists_url": "https://api.github.com/users/TopCoder2K/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/TopCoder2K", "id": 47208659, "login": "TopCoder2K", "node_id": "MDQ6VXNlcjQ3MjA4NjU5", "organizations_url": "https://api.github.com/users/TopCoder2K/orgs", "received_events_url": "https://api.github.com/users/TopCoder2K/received_events", "repos_url": "https://api.github.com/users/TopCoder2K/repos", "site_admin": false, "starred_url": "https://api.github.com/users/TopCoder2K/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/TopCoder2K/subscriptions", "type": "User", "url": "https://api.github.com/users/TopCoder2K", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/47208659?v=4", "events_url": "https://api.github.com/users/TopCoder2K/events{/privacy}", "followers_url": "https://api.github.com/users/TopCoder2K/followers", "following_url": "https://api.github.com/users/TopCoder2K/following{/other_user}", "gists_url": "https://api.github.com/users/TopCoder2K/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/TopCoder2K", "id": 47208659, "login": "TopCoder2K", "node_id": "MDQ6VXNlcjQ3MjA4NjU5", "organizations_url": "https://api.github.com/users/TopCoder2K/orgs", "received_events_url": "https://api.github.com/users/TopCoder2K/received_events", "repos_url": "https://api.github.com/users/TopCoder2K/repos", "site_admin": false, "starred_url": "https://api.github.com/users/TopCoder2K/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/TopCoder2K/subscriptions", "type": "User", "url": "https://api.github.com/users/TopCoder2K", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/47208659?v=4", "events_url": "https://api.github.com/users/TopCoder2K/events{/privacy}", "followers_url": "https://api.github.com/users/TopCoder2K/followers", "following_url": "https://api.github.com/users/TopCoder2K/following{/other_user}", "gists_url": "https://api.github.com/users/TopCoder2K/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/TopCoder2K", "id": 47208659, "login": "TopCoder2K", "node_id": "MDQ6VXNlcjQ3MjA4NjU5", "organizations_url": "https://api.github.com/users/TopCoder2K/orgs", "received_events_url": "https://api.github.com/users/TopCoder2K/received_events", "repos_url": "https://api.github.com/users/TopCoder2K/repos", "site_admin": false, "starred_url": "https://api.github.com/users/TopCoder2K/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/TopCoder2K/subscriptions", "type": "User", "url": "https://api.github.com/users/TopCoder2K", "user_view_type": "public" } ]
null
11
2025-01-28T13:17:36Z
2025-05-22T18:00:04Z
2025-05-22T18:00:04Z
CONTRIBUTOR
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Feature request I would like to be able to iterate (and re-iterate if needed) over a column of an `IterableDataset` instance. The following example shows the supposed API: ```python def gen(): yield {"text": "Good", "label": 0} yield {"text": "Bad", "label": 1} ds = IterableDataset.from_generator(gen) texts = ds["text"] for v in texts: print(v) # Prints "Good" and "Bad" for v in texts: print(v) # Prints "Good" and "Bad" again ``` ### Motivation In the real world problems, huge NNs like Transformer are not always the best option, so there is a need to conduct experiments with different methods. While 🤗Datasets is perfectly adapted to 🤗Transformers, it may be inconvenient when being used with other libraries. The ability to retrieve a particular column is the case (e.g., gensim's FastText [requires](https://radimrehurek.com/gensim/models/fasttext.html#gensim.models.fasttext.FastText.train) only lists of strings, not dictionaries). While there are ways to achieve the desired functionality, they are not good ([forum](https://discuss.huggingface.co/t/how-to-iterate-over-values-of-a-column-in-the-iterabledataset/135649)). It would be great if there was a built-in solution. ### Your contribution Theoretically, I can submit a PR, but I have very little knowledge of the internal structure of 🤗Datasets, so some help may be needed. Moreover, I can only work on weekends, since I have a full-time job. However, the feature does not seem to be popular, so there is no need to implement it as fast as possible.
{ "avatar_url": "https://avatars.githubusercontent.com/u/47208659?v=4", "events_url": "https://api.github.com/users/TopCoder2K/events{/privacy}", "followers_url": "https://api.github.com/users/TopCoder2K/followers", "following_url": "https://api.github.com/users/TopCoder2K/following{/other_user}", "gists_url": "https://api.github.com/users/TopCoder2K/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/TopCoder2K", "id": 47208659, "login": "TopCoder2K", "node_id": "MDQ6VXNlcjQ3MjA4NjU5", "organizations_url": "https://api.github.com/users/TopCoder2K/orgs", "received_events_url": "https://api.github.com/users/TopCoder2K/received_events", "repos_url": "https://api.github.com/users/TopCoder2K/repos", "site_admin": false, "starred_url": "https://api.github.com/users/TopCoder2K/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/TopCoder2K/subscriptions", "type": "User", "url": "https://api.github.com/users/TopCoder2K", "user_view_type": "public" }
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/7381/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7381/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/7380
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7380/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7380/comments
https://api.github.com/repos/huggingface/datasets/issues/7380/events
https://github.com/huggingface/datasets/pull/7380
2,811,566,116
PR_kwDODunzps6JAkj5
7,380
fix: dill default for version bigger 0.3.8
{ "avatar_url": "https://avatars.githubusercontent.com/u/40773225?v=4", "events_url": "https://api.github.com/users/sam-hey/events{/privacy}", "followers_url": "https://api.github.com/users/sam-hey/followers", "following_url": "https://api.github.com/users/sam-hey/following{/other_user}", "gists_url": "https://api.github.com/users/sam-hey/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/sam-hey", "id": 40773225, "login": "sam-hey", "node_id": "MDQ6VXNlcjQwNzczMjI1", "organizations_url": "https://api.github.com/users/sam-hey/orgs", "received_events_url": "https://api.github.com/users/sam-hey/received_events", "repos_url": "https://api.github.com/users/sam-hey/repos", "site_admin": false, "starred_url": "https://api.github.com/users/sam-hey/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sam-hey/subscriptions", "type": "User", "url": "https://api.github.com/users/sam-hey", "user_view_type": "public" }
[]
closed
false
null
[]
null
1
2025-01-26T13:37:16Z
2025-03-13T20:40:19Z
2025-03-13T20:40:19Z
NONE
null
null
null
Fixes def log for dill version >= 0.3.9 https://pypi.org/project/dill/ This project uses dill with the release of version 0.3.9 the datasets lib.
{ "avatar_url": "https://avatars.githubusercontent.com/u/40773225?v=4", "events_url": "https://api.github.com/users/sam-hey/events{/privacy}", "followers_url": "https://api.github.com/users/sam-hey/followers", "following_url": "https://api.github.com/users/sam-hey/following{/other_user}", "gists_url": "https://api.github.com/users/sam-hey/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/sam-hey", "id": 40773225, "login": "sam-hey", "node_id": "MDQ6VXNlcjQwNzczMjI1", "organizations_url": "https://api.github.com/users/sam-hey/orgs", "received_events_url": "https://api.github.com/users/sam-hey/received_events", "repos_url": "https://api.github.com/users/sam-hey/repos", "site_admin": false, "starred_url": "https://api.github.com/users/sam-hey/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sam-hey/subscriptions", "type": "User", "url": "https://api.github.com/users/sam-hey", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7380/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7380/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7380.diff", "html_url": "https://github.com/huggingface/datasets/pull/7380", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/7380.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7380" }
https://api.github.com/repos/huggingface/datasets/issues/7378
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7378/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7378/comments
https://api.github.com/repos/huggingface/datasets/issues/7378/events
https://github.com/huggingface/datasets/issues/7378
2,802,957,388
I_kwDODunzps6nEbxM
7,378
Allow pushing config version to hub
{ "avatar_url": "https://avatars.githubusercontent.com/u/129072?v=4", "events_url": "https://api.github.com/users/momeara/events{/privacy}", "followers_url": "https://api.github.com/users/momeara/followers", "following_url": "https://api.github.com/users/momeara/following{/other_user}", "gists_url": "https://api.github.com/users/momeara/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/momeara", "id": 129072, "login": "momeara", "node_id": "MDQ6VXNlcjEyOTA3Mg==", "organizations_url": "https://api.github.com/users/momeara/orgs", "received_events_url": "https://api.github.com/users/momeara/received_events", "repos_url": "https://api.github.com/users/momeara/repos", "site_admin": false, "starred_url": "https://api.github.com/users/momeara/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/momeara/subscriptions", "type": "User", "url": "https://api.github.com/users/momeara", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
null
[]
null
1
2025-01-21T22:35:07Z
2025-01-30T13:56:56Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Feature request Currently, when datasets are created, they can be versioned by passing the `version` argument to `load_dataset(...)`. For example creating `outcomes.csv` on the command line ``` echo "id,value\n1,0\n2,0\n3,1\n4,1\n" > outcomes.csv ``` and creating it ``` import datasets dataset = datasets.load_dataset( "csv", data_files ="outcomes.csv", keep_in_memory = True, version = '1.0.0') ``` The version info is stored in the `info` and can be accessed e.g. by `next(iter(dataset.values())).info.version` This dataset can be uploaded to the hub with `dataset.push_to_hub(repo_id = "maomlab/example_dataset")`. This will create a dataset on the hub with the following in the `README.md`, but it doesn't upload the version information: ``` --- dataset_info: features: - name: id dtype: int64 - name: value dtype: int64 splits: - name: train num_bytes: 64 num_examples: 4 download_size: 1332 dataset_size: 64 configs: - config_name: default data_files: - split: train path: data/train-* --- ``` However, when I download from the hub, the version information is missing: ``` dataset_from_hub_no_version = datasets.load_dataset("maomlab/example_dataset") next(iter(dataset.values())).info.version ``` I can add the version information manually to the hub, by appending it to the end of config section: ``` ... configs: - config_name: default data_files: - split: train path: data/train-* version: 1.0.0 --- ``` And then when I download it, the version information is correct. ### Motivation ### Why adding version information for each config makes sense 1. The version information is already recorded in the dataset config info data structure and is able to parse it correctly, so it makes sense to sync it with `push_to_hub`. 2. Keeping the version info in at the config level is different from version info at the branch level. As the former relates to the version of the specific dataset the config refers to rather than the version of the dataset curation itself. ## A explanation for the current behavior: In [datasets/src/datasets/info.py:159](https://github.com/huggingface/datasets/blob/fb91fd3c9ea91a818681a777faf8d0c46f14c680/src/datasets/info.py#L159C1-L160C1 ), the `_INCLUDED_INFO_IN_YAML` variable doesn't include `"version"`. If my reading of the code is right, adding `"version"` to `_INCLUDED_INFO_IN_YAML`, would allow the version information to be uploaded to the hub. ### Your contribution Request: add `"version"` to `_INCLUDE_INFO_IN_YAML` in [datasets/src/datasets/info.py:159](https://github.com/huggingface/datasets/blob/fb91fd3c9ea91a818681a777faf8d0c46f14c680/src/datasets/info.py#L159C1-L160C1 )
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7378/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7378/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7377
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7377/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7377/comments
https://api.github.com/repos/huggingface/datasets/issues/7377/events
https://github.com/huggingface/datasets/issues/7377
2,802,723,285
I_kwDODunzps6nDinV
7,377
Support for sparse arrays with the Arrow Sparse Tensor format?
{ "avatar_url": "https://avatars.githubusercontent.com/u/3231217?v=4", "events_url": "https://api.github.com/users/JulesGM/events{/privacy}", "followers_url": "https://api.github.com/users/JulesGM/followers", "following_url": "https://api.github.com/users/JulesGM/following{/other_user}", "gists_url": "https://api.github.com/users/JulesGM/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/JulesGM", "id": 3231217, "login": "JulesGM", "node_id": "MDQ6VXNlcjMyMzEyMTc=", "organizations_url": "https://api.github.com/users/JulesGM/orgs", "received_events_url": "https://api.github.com/users/JulesGM/received_events", "repos_url": "https://api.github.com/users/JulesGM/repos", "site_admin": false, "starred_url": "https://api.github.com/users/JulesGM/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/JulesGM/subscriptions", "type": "User", "url": "https://api.github.com/users/JulesGM", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
null
[]
null
1
2025-01-21T20:14:35Z
2025-01-30T14:06:45Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Feature request AI in biology is becoming a big thing. One thing that would be a huge benefit to the field that Huggingface Datasets doesn't currently have is native support for **sparse arrays**. Arrow has support for sparse tensors. https://arrow.apache.org/docs/format/Other.html#sparse-tensor It would be a big deal if Hugging Face Datasets supported sparse tensors as a feature type, natively. ### Motivation This is important for example in the field of transcriptomics (modeling and understanding gene expression), because a large fraction of the genes are not expressed (zero). More generally, in science, sparse arrays are very common, so adding support for them would be very benefitial, it would make just using Hugging Face Dataset objects a lot more straightforward and clean. ### Your contribution We can discuss this further once the team comments of what they think about the feature, and if there were previous attempts at making it work, and understanding their evaluation of how hard it would be. My intuition is that it should be fairly straightforward, as the Arrow backend already supports it.
null
{ "+1": 2, "-1": 0, "confused": 0, "eyes": 2, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 4, "url": "https://api.github.com/repos/huggingface/datasets/issues/7377/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7377/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7376
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7376/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7376/comments
https://api.github.com/repos/huggingface/datasets/issues/7376/events
https://github.com/huggingface/datasets/pull/7376
2,802,621,104
PR_kwDODunzps6IiO9j
7,376
[docs] uv install
{ "avatar_url": "https://avatars.githubusercontent.com/u/59462357?v=4", "events_url": "https://api.github.com/users/stevhliu/events{/privacy}", "followers_url": "https://api.github.com/users/stevhliu/followers", "following_url": "https://api.github.com/users/stevhliu/following{/other_user}", "gists_url": "https://api.github.com/users/stevhliu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/stevhliu", "id": 59462357, "login": "stevhliu", "node_id": "MDQ6VXNlcjU5NDYyMzU3", "organizations_url": "https://api.github.com/users/stevhliu/orgs", "received_events_url": "https://api.github.com/users/stevhliu/received_events", "repos_url": "https://api.github.com/users/stevhliu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/stevhliu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stevhliu/subscriptions", "type": "User", "url": "https://api.github.com/users/stevhliu", "user_view_type": "public" }
[]
closed
false
null
[]
null
0
2025-01-21T19:15:48Z
2025-03-14T20:16:35Z
2025-03-14T20:16:35Z
MEMBER
null
null
null
Proposes adding uv to installation docs (see Slack thread [here](https://huggingface.slack.com/archives/C01N44FJDHT/p1737377177709279) for more context) if you're interested!
{ "avatar_url": "https://avatars.githubusercontent.com/u/59462357?v=4", "events_url": "https://api.github.com/users/stevhliu/events{/privacy}", "followers_url": "https://api.github.com/users/stevhliu/followers", "following_url": "https://api.github.com/users/stevhliu/following{/other_user}", "gists_url": "https://api.github.com/users/stevhliu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/stevhliu", "id": 59462357, "login": "stevhliu", "node_id": "MDQ6VXNlcjU5NDYyMzU3", "organizations_url": "https://api.github.com/users/stevhliu/orgs", "received_events_url": "https://api.github.com/users/stevhliu/received_events", "repos_url": "https://api.github.com/users/stevhliu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/stevhliu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stevhliu/subscriptions", "type": "User", "url": "https://api.github.com/users/stevhliu", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7376/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7376/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7376.diff", "html_url": "https://github.com/huggingface/datasets/pull/7376", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/7376.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7376" }
https://api.github.com/repos/huggingface/datasets/issues/7375
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7375/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7375/comments
https://api.github.com/repos/huggingface/datasets/issues/7375/events
https://github.com/huggingface/datasets/issues/7375
2,800,609,218
I_kwDODunzps6m7efC
7,375
vllm批量推理报错
{ "avatar_url": "https://avatars.githubusercontent.com/u/51228154?v=4", "events_url": "https://api.github.com/users/YuShengzuishuai/events{/privacy}", "followers_url": "https://api.github.com/users/YuShengzuishuai/followers", "following_url": "https://api.github.com/users/YuShengzuishuai/following{/other_user}", "gists_url": "https://api.github.com/users/YuShengzuishuai/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/YuShengzuishuai", "id": 51228154, "login": "YuShengzuishuai", "node_id": "MDQ6VXNlcjUxMjI4MTU0", "organizations_url": "https://api.github.com/users/YuShengzuishuai/orgs", "received_events_url": "https://api.github.com/users/YuShengzuishuai/received_events", "repos_url": "https://api.github.com/users/YuShengzuishuai/repos", "site_admin": false, "starred_url": "https://api.github.com/users/YuShengzuishuai/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/YuShengzuishuai/subscriptions", "type": "User", "url": "https://api.github.com/users/YuShengzuishuai", "user_view_type": "public" }
[]
open
false
null
[]
null
1
2025-01-21T03:22:23Z
2025-01-30T14:02:40Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug ![Image](https://github.com/user-attachments/assets/3d958e43-28dc-4467-9333-5990c7af3b3f) ### Steps to reproduce the bug ![Image](https://github.com/user-attachments/assets/3067eeca-a54d-4956-b0fd-3fc5ea93dabb) ### Expected behavior ![Image](https://github.com/user-attachments/assets/77d32936-488f-4572-9365-bfb4170e555b) ### Environment info ![Image](https://github.com/user-attachments/assets/230335c4-825f-4db1-b07d-4776ef63ead8)
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7375/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7375/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7374
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7374/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7374/comments
https://api.github.com/repos/huggingface/datasets/issues/7374/events
https://github.com/huggingface/datasets/pull/7374
2,793,442,320
PR_kwDODunzps6IC66n
7,374
Remove .h5 from imagefolder extensions
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
0
2025-01-16T18:17:24Z
2025-01-16T18:26:40Z
2025-01-16T18:26:38Z
MEMBER
null
null
null
the format is not relevant for imagefolder, and makes the viewer fail to process datasets on HF (so many that the viewer takes more time to process new datasets)
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7374/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7374/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7374.diff", "html_url": "https://github.com/huggingface/datasets/pull/7374", "merged_at": "2025-01-16T18:26:38Z", "patch_url": "https://github.com/huggingface/datasets/pull/7374.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7374" }
https://api.github.com/repos/huggingface/datasets/issues/7373
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7373/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7373/comments
https://api.github.com/repos/huggingface/datasets/issues/7373/events
https://github.com/huggingface/datasets/issues/7373
2,793,237,139
I_kwDODunzps6mfWqT
7,373
Excessive RAM Usage After Dataset Concatenation concatenate_datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/40773225?v=4", "events_url": "https://api.github.com/users/sam-hey/events{/privacy}", "followers_url": "https://api.github.com/users/sam-hey/followers", "following_url": "https://api.github.com/users/sam-hey/following{/other_user}", "gists_url": "https://api.github.com/users/sam-hey/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/sam-hey", "id": 40773225, "login": "sam-hey", "node_id": "MDQ6VXNlcjQwNzczMjI1", "organizations_url": "https://api.github.com/users/sam-hey/orgs", "received_events_url": "https://api.github.com/users/sam-hey/received_events", "repos_url": "https://api.github.com/users/sam-hey/repos", "site_admin": false, "starred_url": "https://api.github.com/users/sam-hey/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sam-hey/subscriptions", "type": "User", "url": "https://api.github.com/users/sam-hey", "user_view_type": "public" }
[]
open
false
null
[]
null
3
2025-01-16T16:33:10Z
2025-03-27T17:40:59Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug When loading a dataset from disk, concatenating it, and starting the training process, the RAM usage progressively increases until the kernel terminates the process due to excessive memory consumption. https://github.com/huggingface/datasets/issues/2276 ### Steps to reproduce the bug ```python from datasets import DatasetDict, concatenate_datasets dataset = DatasetDict.load_from_disk("data") ... ... combined_dataset = concatenate_datasets( [dataset[split] for split in dataset] ) #start SentenceTransformer training ``` ### Expected behavior I would not expect RAM utilization to increase after concatenation. Removing the concatenation step resolves the issue ### Environment info sentence-transformers==3.1.1 datasets==3.2.0 python3.10
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7373/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7373/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7372
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7372/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7372/comments
https://api.github.com/repos/huggingface/datasets/issues/7372/events
https://github.com/huggingface/datasets/issues/7372
2,791,760,968
I_kwDODunzps6mZuRI
7,372
Inconsistent Behavior Between `load_dataset` and `load_from_disk` When Loading Sharded Datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/38203359?v=4", "events_url": "https://api.github.com/users/gaohongkui/events{/privacy}", "followers_url": "https://api.github.com/users/gaohongkui/followers", "following_url": "https://api.github.com/users/gaohongkui/following{/other_user}", "gists_url": "https://api.github.com/users/gaohongkui/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/gaohongkui", "id": 38203359, "login": "gaohongkui", "node_id": "MDQ6VXNlcjM4MjAzMzU5", "organizations_url": "https://api.github.com/users/gaohongkui/orgs", "received_events_url": "https://api.github.com/users/gaohongkui/received_events", "repos_url": "https://api.github.com/users/gaohongkui/repos", "site_admin": false, "starred_url": "https://api.github.com/users/gaohongkui/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gaohongkui/subscriptions", "type": "User", "url": "https://api.github.com/users/gaohongkui", "user_view_type": "public" }
[]
open
false
null
[]
null
0
2025-01-16T05:47:20Z
2025-01-16T05:47:20Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Description I encountered an inconsistency in behavior between `load_dataset` and `load_from_disk` when loading sharded datasets. Here is a minimal example to reproduce the issue: #### Code 1: Using `load_dataset` ```python from datasets import Dataset, load_dataset # First save with max_shard_size=10 Dataset.from_dict({"id": range(1000)}).train_test_split(test_size=0.1).save_to_disk("my_sharded_datasetdict", max_shard_size=10) # Second save with max_shard_size=10 Dataset.from_dict({"id": range(500)}).train_test_split(test_size=0.1).save_to_disk("my_sharded_datasetdict", max_shard_size=10) # Load the DatasetDict loaded_datasetdict = load_dataset("my_sharded_datasetdict") print(loaded_datasetdict) ``` **Output**: - `train` has 1350 samples. - `test` has 150 samples. #### Code 2: Using `load_from_disk` ```python from datasets import Dataset, load_from_disk # First save with max_shard_size=10 Dataset.from_dict({"id": range(1000)}).train_test_split(test_size=0.1).save_to_disk("my_sharded_datasetdict", max_shard_size=10) # Second save with max_shard_size=10 Dataset.from_dict({"id": range(500)}).train_test_split(test_size=0.1).save_to_disk("my_sharded_datasetdict", max_shard_size=10) # Load the DatasetDict loaded_datasetdict = load_from_disk("my_sharded_datasetdict") print(loaded_datasetdict) ``` **Output**: - `train` has 450 samples. - `test` has 50 samples. ### Expected Behavior I expected both `load_dataset` and `load_from_disk` to load the same dataset, as they are pointing to the same directory. However, the results differ significantly: - `load_dataset` seems to merge all shards, resulting in a combined dataset. - `load_from_disk` only loads the last saved dataset, ignoring previous shards. ### Questions 1. Is this behavior intentional? If so, could you clarify the difference between `load_dataset` and `load_from_disk` in the documentation? 2. If this is not intentional, could this be considered a bug? 3. What is the recommended way to handle cases where multiple datasets are saved to the same directory? Thank you for your time and effort in maintaining this great library! I look forward to your feedback.
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7372/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7372/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7371
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7371/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7371/comments
https://api.github.com/repos/huggingface/datasets/issues/7371/events
https://github.com/huggingface/datasets/issues/7371
2,790,549,889
I_kwDODunzps6mVGmB
7,371
500 Server error with pushing a dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/7677814?v=4", "events_url": "https://api.github.com/users/martinmatak/events{/privacy}", "followers_url": "https://api.github.com/users/martinmatak/followers", "following_url": "https://api.github.com/users/martinmatak/following{/other_user}", "gists_url": "https://api.github.com/users/martinmatak/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/martinmatak", "id": 7677814, "login": "martinmatak", "node_id": "MDQ6VXNlcjc2Nzc4MTQ=", "organizations_url": "https://api.github.com/users/martinmatak/orgs", "received_events_url": "https://api.github.com/users/martinmatak/received_events", "repos_url": "https://api.github.com/users/martinmatak/repos", "site_admin": false, "starred_url": "https://api.github.com/users/martinmatak/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/martinmatak/subscriptions", "type": "User", "url": "https://api.github.com/users/martinmatak", "user_view_type": "public" }
[]
open
false
null
[]
null
1
2025-01-15T18:23:02Z
2025-01-15T20:06:05Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug Suddenly, I started getting this error message saying it was an internal error. `Error creating/pushing dataset: 500 Server Error: Internal Server Error for url: https://huggingface.co/api/datasets/ll4ma-lab/grasp-dataset/commit/main (Request ID: Root=1-6787f0b7-66d5bd45413e481c4c2fb22d;670d04ff-65f5-4741-a353-2eacc47a3928) Internal Error - We're working hard to fix this as soon as possible! Traceback (most recent call last): File "/uufs/chpc.utah.edu/common/home/hermans-group1/martin/software/pkg/miniforge3/envs/myenv2/lib/python3.10/site-packages/huggingface_hub/utils/_http.py", line 406, in hf_raise_for_status response.raise_for_status() File "/uufs/chpc.utah.edu/common/home/hermans-group1/martin/software/pkg/miniforge3/envs/myenv2/lib/python3.10/site-packages/requests/models.py", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 500 Server Error: Internal Server Error for url: https://huggingface.co/api/datasets/ll4ma-lab/grasp-dataset/commit/main The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/uufs/chpc.utah.edu/common/home/u1295595/grasp_dataset_converter/src/grasp_dataset_converter/main.py", line 142, in main subset_train.push_to_hub(dataset_name, split='train') File "/uufs/chpc.utah.edu/common/home/hermans-group1/martin/software/pkg/miniforge3/envs/myenv2/lib/python3.10/site-packages/datasets/arrow_dataset.py", line 5624, in push_to_hub commit_info = api.create_commit( File "/uufs/chpc.utah.edu/common/home/hermans-group1/martin/software/pkg/miniforge3/envs/myenv2/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn return fn(*args, **kwargs) File "/uufs/chpc.utah.edu/common/home/hermans-group1/martin/software/pkg/miniforge3/envs/myenv2/lib/python3.10/site-packages/huggingface_hub/hf_api.py", line 1518, in _inner return fn(self, *args, **kwargs) File "/uufs/chpc.utah.edu/common/home/hermans-group1/martin/software/pkg/miniforge3/envs/myenv2/lib/python3.10/site-packages/huggingface_hub/hf_api.py", line 4087, in create_commit hf_raise_for_status(commit_resp, endpoint_name="commit") File "/uufs/chpc.utah.edu/common/home/hermans-group1/martin/software/pkg/miniforge3/envs/myenv2/lib/python3.10/site-packages/huggingface_hub/utils/_http.py", line 477, in hf_raise_for_status raise _format(HfHubHTTPError, str(e), response) from e huggingface_hub.errors.HfHubHTTPError: 500 Server Error: Internal Server Error for url: https://huggingface.co/api/datasets/ll4ma-lab/grasp-dataset/commit/main (Request ID: Root=1-6787f0b7-66d5bd45413e481c4c2fb22d;670d04ff-65f5-4741-a353-2eacc47a3928) Internal Error - We're working hard to fix this as soon as possible!` ### Steps to reproduce the bug I am pushing a Dataset in a loop via push_to_hub API ### Expected behavior It worked fine until it stopped working suddenly. Expected behavior: It should start working again ### Environment info - `datasets` version: 3.2.0 - Platform: Linux-4.18.0-477.15.1.el8_8.x86_64-x86_64-with-glibc2.28 - Python version: 3.10.0 - `huggingface_hub` version: 0.27.1 - PyArrow version: 18.1.0 - Pandas version: 2.2.3 - `fsspec` version: 2024.9.0
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7371/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7371/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7370
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7370/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7370/comments
https://api.github.com/repos/huggingface/datasets/issues/7370/events
https://github.com/huggingface/datasets/pull/7370
2,787,972,786
PR_kwDODunzps6HwAu7
7,370
Support faster processing using pandas or polars functions in `IterableDataset.map()`
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
2
2025-01-14T18:14:13Z
2025-01-31T11:08:15Z
2025-01-30T13:30:57Z
MEMBER
null
null
null
Following the polars integration :) Allow super fast processing using pandas or polars functions in `IterableDataset.map()` by adding support to pandas and polars formatting in `IterableDataset` ```python import polars as pl from datasets import Dataset ds = Dataset.from_dict({"i": range(10)}).to_iterable_dataset() ds = ds.with_format("polars") ds = ds.map(lambda df: df.with_columns(pl.col("i").add(1).alias("i+1")), batched=True) ds = ds.with_format(None) print(next(iter(ds))) # {'i': 0, 'i+1': 1} ``` It leverages arrow's zero-copy features from/to pandas and polars. related to https://github.com/huggingface/datasets/issues/3444 #6762
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7370/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7370/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7370.diff", "html_url": "https://github.com/huggingface/datasets/pull/7370", "merged_at": "2025-01-30T13:30:57Z", "patch_url": "https://github.com/huggingface/datasets/pull/7370.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7370" }
https://api.github.com/repos/huggingface/datasets/issues/7369
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7369/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7369/comments
https://api.github.com/repos/huggingface/datasets/issues/7369/events
https://github.com/huggingface/datasets/issues/7369
2,787,193,238
I_kwDODunzps6mITGW
7,369
Importing dataset gives unhelpful error message when filenames in metadata.csv are not found in the directory
{ "avatar_url": "https://avatars.githubusercontent.com/u/38278139?v=4", "events_url": "https://api.github.com/users/svencornetsdegroot/events{/privacy}", "followers_url": "https://api.github.com/users/svencornetsdegroot/followers", "following_url": "https://api.github.com/users/svencornetsdegroot/following{/other_user}", "gists_url": "https://api.github.com/users/svencornetsdegroot/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/svencornetsdegroot", "id": 38278139, "login": "svencornetsdegroot", "node_id": "MDQ6VXNlcjM4Mjc4MTM5", "organizations_url": "https://api.github.com/users/svencornetsdegroot/orgs", "received_events_url": "https://api.github.com/users/svencornetsdegroot/received_events", "repos_url": "https://api.github.com/users/svencornetsdegroot/repos", "site_admin": false, "starred_url": "https://api.github.com/users/svencornetsdegroot/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/svencornetsdegroot/subscriptions", "type": "User", "url": "https://api.github.com/users/svencornetsdegroot", "user_view_type": "public" }
[]
open
false
null
[]
null
1
2025-01-14T13:53:21Z
2025-01-14T15:05:51Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug While importing an audiofolder dataset, where the names of the audiofiles don't correspond to the filenames in the metadata.csv, we get an unclear error message that is not helpful for the debugging, i.e. ``` ValueError: Instruction "train" corresponds to no data! ``` ### Steps to reproduce the bug Assume an audiofolder with audiofiles, filename1.mp3, filename2.mp3 etc and a file metadata.csv which contains the columns file_name and sentence. The file_names are formatted like filename1.mp3, filename2.mp3 etc. Load the audio ``` from datasets import load_dataset load_dataset("audiofolder", data_dir='/path/to/audiofolder') ``` When the file_names in the csv are not in sync with the filenames in the audiofolder, then we get an Error message: ``` File /opt/conda/lib/python3.12/site-packages/datasets/arrow_reader.py:251, in BaseReader.read(self, name, instructions, split_infos, in_memory) 249 if not files: 250 msg = f'Instruction "{instructions}" corresponds to no data!' --> 251 raise ValueError(msg) 252 return self.read_files(files=files, original_instructions=instructions, in_memory=in_memory) ValueError: Instruction "train" corresponds to no data! ``` load_dataset has a default value for the argument split = 'train'. ### Expected behavior It would be better to get an error report something like: ``` The metadata.csv file has different filenames than the files in the datadirectory. ``` It would have saved me 4 hours of debugging. ### Environment info - `datasets` version: 3.2.0 - Platform: Linux-5.14.0-427.40.1.el9_4.x86_64-x86_64-with-glibc2.39 - Python version: 3.12.8 - `huggingface_hub` version: 0.27.0 - PyArrow version: 18.1.0 - Pandas version: 2.2.3 - `fsspec` version: 2024.9.0
null
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/7369/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7369/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7368
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7368/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7368/comments
https://api.github.com/repos/huggingface/datasets/issues/7368/events
https://github.com/huggingface/datasets/pull/7368
2,784,272,477
PR_kwDODunzps6HjE97
7,368
Add with_split to DatasetDict.map
{ "avatar_url": "https://avatars.githubusercontent.com/u/93233241?v=4", "events_url": "https://api.github.com/users/jp1924/events{/privacy}", "followers_url": "https://api.github.com/users/jp1924/followers", "following_url": "https://api.github.com/users/jp1924/following{/other_user}", "gists_url": "https://api.github.com/users/jp1924/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jp1924", "id": 93233241, "login": "jp1924", "node_id": "U_kgDOBY6gWQ", "organizations_url": "https://api.github.com/users/jp1924/orgs", "received_events_url": "https://api.github.com/users/jp1924/received_events", "repos_url": "https://api.github.com/users/jp1924/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jp1924/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jp1924/subscriptions", "type": "User", "url": "https://api.github.com/users/jp1924", "user_view_type": "public" }
[]
closed
false
null
[]
null
9
2025-01-13T15:09:56Z
2025-03-08T05:45:02Z
2025-03-07T14:09:52Z
CONTRIBUTOR
null
null
null
#7356
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7368/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7368/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7368.diff", "html_url": "https://github.com/huggingface/datasets/pull/7368", "merged_at": "2025-03-07T14:09:52Z", "patch_url": "https://github.com/huggingface/datasets/pull/7368.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7368" }
https://api.github.com/repos/huggingface/datasets/issues/7366
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7366/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7366/comments
https://api.github.com/repos/huggingface/datasets/issues/7366/events
https://github.com/huggingface/datasets/issues/7366
2,781,522,894
I_kwDODunzps6lyqvO
7,366
Dataset.from_dict() can't handle large dict
{ "avatar_url": "https://avatars.githubusercontent.com/u/164967134?v=4", "events_url": "https://api.github.com/users/CSU-OSS/events{/privacy}", "followers_url": "https://api.github.com/users/CSU-OSS/followers", "following_url": "https://api.github.com/users/CSU-OSS/following{/other_user}", "gists_url": "https://api.github.com/users/CSU-OSS/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/CSU-OSS", "id": 164967134, "login": "CSU-OSS", "node_id": "U_kgDOCdUy3g", "organizations_url": "https://api.github.com/users/CSU-OSS/orgs", "received_events_url": "https://api.github.com/users/CSU-OSS/received_events", "repos_url": "https://api.github.com/users/CSU-OSS/repos", "site_admin": false, "starred_url": "https://api.github.com/users/CSU-OSS/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/CSU-OSS/subscriptions", "type": "User", "url": "https://api.github.com/users/CSU-OSS", "user_view_type": "public" }
[]
open
false
null
[]
null
0
2025-01-11T02:05:21Z
2025-01-11T02:05:21Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug I have 26,000,000 3-tuples. When I use Dataset.from_dict() to load, neither. py nor Jupiter notebook can run successfully. This is my code: ``` # len(example_data) is 26,000,000, 'diff' is a text diff1_list = [example_data[i].texts[0] for i in range(len(example_data))] diff2_list = [example_data[i].texts[1] for i in range(len(example_data))] label_list = [example_data[i].label for i in range(len(example_data))] embedding_dataset = Dataset.from_dict({ "diff1": diff1_list, "diff2": diff2_list, "label": label_list }) ``` ### Steps to reproduce the bug 1. Initialize a large 3-tuple, e.g. 26,000,000 2. Use Dataset.from_dict() to load ### Expected behavior Dataset.from_dict() run successfully ### Environment info sentence-transformers 3.3.1
null
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/7366/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7366/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7365
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7365/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7365/comments
https://api.github.com/repos/huggingface/datasets/issues/7365/events
https://github.com/huggingface/datasets/issues/7365
2,780,216,199
I_kwDODunzps6ltruH
7,365
A parameter is specified but not used in datasets.arrow_dataset.Dataset.from_pandas()
{ "avatar_url": "https://avatars.githubusercontent.com/u/69003192?v=4", "events_url": "https://api.github.com/users/NourOM02/events{/privacy}", "followers_url": "https://api.github.com/users/NourOM02/followers", "following_url": "https://api.github.com/users/NourOM02/following{/other_user}", "gists_url": "https://api.github.com/users/NourOM02/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/NourOM02", "id": 69003192, "login": "NourOM02", "node_id": "MDQ6VXNlcjY5MDAzMTky", "organizations_url": "https://api.github.com/users/NourOM02/orgs", "received_events_url": "https://api.github.com/users/NourOM02/received_events", "repos_url": "https://api.github.com/users/NourOM02/repos", "site_admin": false, "starred_url": "https://api.github.com/users/NourOM02/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/NourOM02/subscriptions", "type": "User", "url": "https://api.github.com/users/NourOM02", "user_view_type": "public" }
[]
open
false
null
[]
null
0
2025-01-10T13:39:33Z
2025-01-10T13:39:33Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug I am interested in creating train, test and eval splits from a pandas Dataframe, therefore I was looking at the possibilities I can follow. I noticed the split parameter and was hopeful to use it in order to generate the 3 at once, however, while trying to understand the code, i noticed that it has no added value (correct me if I am wrong or misunderstood the code). from_pandas function code : ```python if info is not None and features is not None and info.features != features: raise ValueError( f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}" ) features = features if features is not None else info.features if info is not None else None if info is None: info = DatasetInfo() info.features = features table = InMemoryTable.from_pandas( df=df, preserve_index=preserve_index, ) if features is not None: # more expensive cast than InMemoryTable.from_pandas(..., schema=features.arrow_schema) # needed to support the str to Audio conversion for instance table = table.cast(features.arrow_schema) return cls(table, info=info, split=split) ``` ### Steps to reproduce the bug ```python from datasets import Dataset # Filling the split parameter with whatever causes no harm at all data = Dataset.from_pandas(self.raw_data, split='egiojegoierjgoiejgrefiergiuorenvuirgurthgi') ``` ### Expected behavior Would be great if there is no split parameter (if it isn't working), or to add a concrete example of how it can be used. ### Environment info - `datasets` version: 3.2.0 - Platform: Linux-5.15.0-127-generic-x86_64-with-glibc2.35 - Python version: 3.10.12 - `huggingface_hub` version: 0.27.1 - PyArrow version: 18.1.0 - Pandas version: 2.2.3 - `fsspec` version: 2024.9.0
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7365/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7365/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7364
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7364/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7364/comments
https://api.github.com/repos/huggingface/datasets/issues/7364/events
https://github.com/huggingface/datasets/issues/7364
2,776,929,268
I_kwDODunzps6lhJP0
7,364
API endpoints for gated dataset access requests
{ "avatar_url": "https://avatars.githubusercontent.com/u/6140840?v=4", "events_url": "https://api.github.com/users/jerome-white/events{/privacy}", "followers_url": "https://api.github.com/users/jerome-white/followers", "following_url": "https://api.github.com/users/jerome-white/following{/other_user}", "gists_url": "https://api.github.com/users/jerome-white/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jerome-white", "id": 6140840, "login": "jerome-white", "node_id": "MDQ6VXNlcjYxNDA4NDA=", "organizations_url": "https://api.github.com/users/jerome-white/orgs", "received_events_url": "https://api.github.com/users/jerome-white/received_events", "repos_url": "https://api.github.com/users/jerome-white/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jerome-white/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jerome-white/subscriptions", "type": "User", "url": "https://api.github.com/users/jerome-white", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
null
[]
null
3
2025-01-09T06:21:20Z
2025-01-09T11:17:40Z
2025-01-09T11:17:20Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Feature request I would like a programatic way of requesting access to gated datasets. The current solution to gain access forces me to visit a website and physically click an "agreement" button (as per the [documentation](https://huggingface.co/docs/hub/en/datasets-gated#access-gated-datasets-as-a-user)). An ideal approach would be HF API download methods that negotiate access on my behalf based on information from my CLI login and/or token. I realise that may be naive given the various types of access semantics available to dataset authors (automatic versus manual approval, for example) and complexities it might add to existing methods, but something along those lines would be nice. Perhaps using the `*_access_request` methods available to dataset authors can be a precedent; see [`reject_access_request`](https://huggingface.co/docs/huggingface_hub/main/en/package_reference/hf_api#huggingface_hub.HfApi.reject_access_request) for example. ### Motivation When trying to download files from a gated dataset, I'm met with a `GatedRepoError` and instructed to visit the repository's website to gain access: ``` Cannot access gated repo for url https://huggingface.co/datasets/open-llm-leaderboard/meta-llama__Meta-Llama-3.1-70B-Instruct-details/resolve/main/meta-llama__Meta-Llama-3.1-70B-Instruct/samples_leaderboard_math_precalculus_hard_2024-07-19T18-47-29.522341.jsonl. Access to dataset open-llm-leaderboard/meta-llama__Meta-Llama-3.1-70B-Instruct-details is restricted and you are not in the authorized list. Visit https://huggingface.co/datasets/open-llm-leaderboard/meta-llama__Meta-Llama-3.1-70B-Instruct-details to ask for access. ``` This makes task automation extremely difficult. For example, I'm interested in studying sample-level responses of models on the LLM leaderboard -- how they answered particular questions on a given evaluation framework. As I come across more and more participants that gate their data, it's becoming unwieldy to continue my work (there over 2,000 participants, so in the worst case that's the number of website visits I'd need to manually undertake). One approach is use Selenium to react to the `GatedRepoError`, but that seems like overkill; and a potential violation HF terms of service (?). As mentioned in the previous section, there seems to be an [API for gated dataset owners](https://huggingface.co/docs/hub/en/datasets-gated#via-the-api) to managed access requests, and thus some appetite for allowing automated management of gating. This feature request is to extend that to dataset users. ### Your contribution Whether I can help depends on a few things; one being the complexity of the underlying gated access design. If this feature request is accepted I am open to being involved in discussions and testing, and even development under the right time-outcome tradeoff.
{ "avatar_url": "https://avatars.githubusercontent.com/u/6140840?v=4", "events_url": "https://api.github.com/users/jerome-white/events{/privacy}", "followers_url": "https://api.github.com/users/jerome-white/followers", "following_url": "https://api.github.com/users/jerome-white/following{/other_user}", "gists_url": "https://api.github.com/users/jerome-white/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jerome-white", "id": 6140840, "login": "jerome-white", "node_id": "MDQ6VXNlcjYxNDA4NDA=", "organizations_url": "https://api.github.com/users/jerome-white/orgs", "received_events_url": "https://api.github.com/users/jerome-white/received_events", "repos_url": "https://api.github.com/users/jerome-white/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jerome-white/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jerome-white/subscriptions", "type": "User", "url": "https://api.github.com/users/jerome-white", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7364/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7364/timeline
null
not_planned
null
null
https://api.github.com/repos/huggingface/datasets/issues/7363
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7363/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7363/comments
https://api.github.com/repos/huggingface/datasets/issues/7363/events
https://github.com/huggingface/datasets/issues/7363
2,774,090,012
I_kwDODunzps6lWUEc
7,363
ImportError: To support decoding images, please install 'Pillow'.
{ "avatar_url": "https://avatars.githubusercontent.com/u/1394644?v=4", "events_url": "https://api.github.com/users/jamessdixon/events{/privacy}", "followers_url": "https://api.github.com/users/jamessdixon/followers", "following_url": "https://api.github.com/users/jamessdixon/following{/other_user}", "gists_url": "https://api.github.com/users/jamessdixon/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jamessdixon", "id": 1394644, "login": "jamessdixon", "node_id": "MDQ6VXNlcjEzOTQ2NDQ=", "organizations_url": "https://api.github.com/users/jamessdixon/orgs", "received_events_url": "https://api.github.com/users/jamessdixon/received_events", "repos_url": "https://api.github.com/users/jamessdixon/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jamessdixon/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jamessdixon/subscriptions", "type": "User", "url": "https://api.github.com/users/jamessdixon", "user_view_type": "public" }
[]
open
false
null
[]
null
4
2025-01-08T02:22:57Z
2025-05-28T14:56:53Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug Following this tutorial locally using a macboko and VSCode: https://huggingface.co/docs/diffusers/en/tutorials/basic_training This line of code: for i, image in enumerate(dataset[:4]["image"]): throws: ImportError: To support decoding images, please install 'Pillow'. Pillow is installed. ### Steps to reproduce the bug Run the tutorial ### Expected behavior Images should be rendered ### Environment info MacBook, VSCode
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7363/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7363/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7362
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7362/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7362/comments
https://api.github.com/repos/huggingface/datasets/issues/7362/events
https://github.com/huggingface/datasets/issues/7362
2,773,731,829
I_kwDODunzps6lU8n1
7,362
HuggingFace CLI dataset download raises error
{ "avatar_url": "https://avatars.githubusercontent.com/u/3870355?v=4", "events_url": "https://api.github.com/users/ajayvohra2005/events{/privacy}", "followers_url": "https://api.github.com/users/ajayvohra2005/followers", "following_url": "https://api.github.com/users/ajayvohra2005/following{/other_user}", "gists_url": "https://api.github.com/users/ajayvohra2005/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ajayvohra2005", "id": 3870355, "login": "ajayvohra2005", "node_id": "MDQ6VXNlcjM4NzAzNTU=", "organizations_url": "https://api.github.com/users/ajayvohra2005/orgs", "received_events_url": "https://api.github.com/users/ajayvohra2005/received_events", "repos_url": "https://api.github.com/users/ajayvohra2005/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ajayvohra2005/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ajayvohra2005/subscriptions", "type": "User", "url": "https://api.github.com/users/ajayvohra2005", "user_view_type": "public" }
[]
closed
false
null
[]
null
3
2025-01-07T21:03:30Z
2025-01-08T15:00:37Z
2025-01-08T14:35:52Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug Trying to download Hugging Face datasets using Hugging Face CLI raises error. This error only started after December 27th, 2024. For example: ``` huggingface-cli download --repo-type dataset gboleda/wikicorpus Traceback (most recent call last): File "/home/ubuntu/test_venv/bin/huggingface-cli", line 8, in <module> sys.exit(main()) File "/home/ubuntu/test_venv/lib/python3.10/site-packages/huggingface_hub/commands/huggingface_cli.py", line 51, in main service.run() File "/home/ubuntu/test_venv/lib/python3.10/site-packages/huggingface_hub/commands/download.py", line 146, in run print(self._download()) # Print path to downloaded files File "/home/ubuntu/test_venv/lib/python3.10/site-packages/huggingface_hub/commands/download.py", line 180, in _download return snapshot_download( File "/home/ubuntu/test_venv/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn return fn(*args, **kwargs) File "/home/ubuntu/test_venv/lib/python3.10/site-packages/huggingface_hub/_snapshot_download.py", line 164, in snapshot_download repo_info = api.repo_info(repo_id=repo_id, repo_type=repo_type, revision=revision, token=token) File "/home/ubuntu/test_venv/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn return fn(*args, **kwargs) File "/home/ubuntu/test_venv/lib/python3.10/site-packages/huggingface_hub/hf_api.py", line 2491, in repo_info return method( File "/home/ubuntu/test_venv/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn return fn(*args, **kwargs) File "/home/ubuntu/test_venv/lib/python3.10/site-packages/huggingface_hub/hf_api.py", line 2366, in dataset_info return DatasetInfo(**data) File "/home/ubuntu/test_venv/lib/python3.10/site-packages/huggingface_hub/hf_api.py", line 799, in __init__ self.tags = kwargs.pop("tags") KeyError: 'tags' ``` ### Steps to reproduce the bug ``` 1. huggingface-cli download --repo-type dataset gboleda/wikicorpus ``` ### Expected behavior There should be no error. ### Environment info - `datasets` version: 2.19.1 - Platform: Linux-6.8.0-1015-aws-x86_64-with-glibc2.35 - Python version: 3.10.12 - `huggingface_hub` version: 0.23.5 - PyArrow version: 18.1.0 - Pandas version: 2.2.3 - `fsspec` version: 2024.3.1
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 3, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 3, "url": "https://api.github.com/repos/huggingface/datasets/issues/7362/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7362/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/7361
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7361/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7361/comments
https://api.github.com/repos/huggingface/datasets/issues/7361/events
https://github.com/huggingface/datasets/pull/7361
2,771,859,244
PR_kwDODunzps6G4t2p
7,361
Fix lock permission
{ "avatar_url": "https://avatars.githubusercontent.com/u/11530592?v=4", "events_url": "https://api.github.com/users/cih9088/events{/privacy}", "followers_url": "https://api.github.com/users/cih9088/followers", "following_url": "https://api.github.com/users/cih9088/following{/other_user}", "gists_url": "https://api.github.com/users/cih9088/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/cih9088", "id": 11530592, "login": "cih9088", "node_id": "MDQ6VXNlcjExNTMwNTky", "organizations_url": "https://api.github.com/users/cih9088/orgs", "received_events_url": "https://api.github.com/users/cih9088/received_events", "repos_url": "https://api.github.com/users/cih9088/repos", "site_admin": false, "starred_url": "https://api.github.com/users/cih9088/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cih9088/subscriptions", "type": "User", "url": "https://api.github.com/users/cih9088", "user_view_type": "public" }
[]
open
false
null
[]
null
0
2025-01-07T04:15:53Z
2025-01-07T04:49:46Z
null
NONE
null
null
null
All files except lock file have proper permission obeying `ACL` property if it is set. If the cache directory has `ACL` property, it should be respected instead of just using `umask` for permission. To fix it, just create a lock file and pass the created `mode`. By creating a lock file with `touch()` before `FileLock` create it with `mode`, - if `ACL` is not set, same as before - if `ACL` is set, `ACL` is respected If it is acceptable, it should be also applied to [`huggingface_hub`](https://github.com/huggingface/huggingface_hub/blob/2702ec2a2bd0124cc1fddfd72ccb1297b2478148/src/huggingface_hub/utils/_fixes.py#L95) I guess.
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7361/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7361/timeline
null
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/7361.diff", "html_url": "https://github.com/huggingface/datasets/pull/7361", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/7361.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7361" }
https://api.github.com/repos/huggingface/datasets/issues/7360
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7360/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7360/comments
https://api.github.com/repos/huggingface/datasets/issues/7360/events
https://github.com/huggingface/datasets/issues/7360
2,771,751,406
I_kwDODunzps6lNZHu
7,360
error when loading dataset in Hugging Face: NoneType error is not callable
{ "avatar_url": "https://avatars.githubusercontent.com/u/189343338?v=4", "events_url": "https://api.github.com/users/nanu23333/events{/privacy}", "followers_url": "https://api.github.com/users/nanu23333/followers", "following_url": "https://api.github.com/users/nanu23333/following{/other_user}", "gists_url": "https://api.github.com/users/nanu23333/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/nanu23333", "id": 189343338, "login": "nanu23333", "node_id": "U_kgDOC0kmag", "organizations_url": "https://api.github.com/users/nanu23333/orgs", "received_events_url": "https://api.github.com/users/nanu23333/received_events", "repos_url": "https://api.github.com/users/nanu23333/repos", "site_admin": false, "starred_url": "https://api.github.com/users/nanu23333/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nanu23333/subscriptions", "type": "User", "url": "https://api.github.com/users/nanu23333", "user_view_type": "public" }
[]
open
false
null
[]
null
5
2025-01-07T02:11:36Z
2025-02-24T13:32:52Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug I met an error when running a notebook provide by Hugging Face, and met the error. ``` --------------------------------------------------------------------------- TypeError Traceback (most recent call last) Cell In[2], line 5 3 # Load the enhancers dataset from the InstaDeep Hugging Face ressources 4 dataset_name = "enhancers_types" ----> 5 train_dataset_enhancers = load_dataset( 6 "InstaDeepAI/nucleotide_transformer_downstream_tasks_revised", 7 dataset_name, 8 split="train", 9 streaming= False, 10 ) 11 test_dataset_enhancers = load_dataset( 12 "InstaDeepAI/nucleotide_transformer_downstream_tasks_revised", 13 dataset_name, 14 split="test", 15 streaming= False, 16 ) File /public/home/hhl/miniconda3/envs/transformer/lib/python3.9/site-packages/datasets/load.py:2129, in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, verification_mode, keep_in_memory, save_infos, revision, token, streaming, num_proc, storage_options, trust_remote_code, **config_kwargs) 2124 verification_mode = VerificationMode( 2125 (verification_mode or VerificationMode.BASIC_CHECKS) if not save_infos else VerificationMode.ALL_CHECKS 2126 ) 2128 # Create a dataset builder -> 2129 builder_instance = load_dataset_builder( 2130 path=path, 2131 name=name, 2132 data_dir=data_dir, 2133 data_files=data_files, 2134 cache_dir=cache_dir, 2135 features=features, 2136 download_config=download_config, 2137 download_mode=download_mode, 2138 revision=revision, 2139 token=token, 2140 storage_options=storage_options, 2141 trust_remote_code=trust_remote_code, 2142 _require_default_config_name=name is None, 2143 **config_kwargs, 2144 ) 2146 # Return iterable dataset in case of streaming 2147 if streaming: File /public/home/hhl/miniconda3/envs/transformer/lib/python3.9/site-packages/datasets/load.py:1886, in load_dataset_builder(path, name, data_dir, data_files, cache_dir, features, download_config, download_mode, revision, token, storage_options, trust_remote_code, _require_default_config_name, **config_kwargs) 1884 builder_cls = get_dataset_builder_class(dataset_module, dataset_name=dataset_name) 1885 # Instantiate the dataset builder -> 1886 builder_instance: DatasetBuilder = builder_cls( 1887 cache_dir=cache_dir, 1888 dataset_name=dataset_name, 1889 config_name=config_name, 1890 data_dir=data_dir, 1891 data_files=data_files, 1892 hash=dataset_module.hash, 1893 info=info, 1894 features=features, 1895 token=token, 1896 storage_options=storage_options, 1897 **builder_kwargs, 1898 **config_kwargs, 1899 ) 1900 builder_instance._use_legacy_cache_dir_if_possible(dataset_module) 1902 return builder_instance TypeError: 'NoneType' object is not callable ``` I have checked my internet, it worked well. And the dataset name was just copied from the Hugging Face. Totally no idea what is wrong! ### Steps to reproduce the bug To reproduce the bug you may run ``` from datasets import load_dataset, Dataset # Load the enhancers dataset from the InstaDeep Hugging Face ressources dataset_name = "enhancers_types" train_dataset_enhancers = load_dataset( "InstaDeepAI/nucleotide_transformer_downstream_tasks_revised", dataset_name, split="train", streaming= False, ) test_dataset_enhancers = load_dataset( "InstaDeepAI/nucleotide_transformer_downstream_tasks_revised", dataset_name, split="test", streaming= False, ) ``` ### Expected behavior 1. what may be the reasons of the error 2. how can I fine which reason lead to the error 3. how can I save the problem ### Environment info ``` - `datasets` version: 3.2.0 - Platform: Linux-5.15.0-117-generic-x86_64-with-glibc2.31 - Python version: 3.9.21 - `huggingface_hub` version: 0.27.0 - PyArrow version: 18.1.0 - Pandas version: 2.2.3 - `fsspec` version: 2024.9.0 ```
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7360/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7360/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7359
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7359/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7359/comments
https://api.github.com/repos/huggingface/datasets/issues/7359/events
https://github.com/huggingface/datasets/issues/7359
2,771,137,842
I_kwDODunzps6lLDUy
7,359
There are multiple 'mteb/arguana' configurations in the cache: default, corpus, queries with HF_HUB_OFFLINE=1
{ "avatar_url": "https://avatars.githubusercontent.com/u/723146?v=4", "events_url": "https://api.github.com/users/Bhavya6187/events{/privacy}", "followers_url": "https://api.github.com/users/Bhavya6187/followers", "following_url": "https://api.github.com/users/Bhavya6187/following{/other_user}", "gists_url": "https://api.github.com/users/Bhavya6187/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Bhavya6187", "id": 723146, "login": "Bhavya6187", "node_id": "MDQ6VXNlcjcyMzE0Ng==", "organizations_url": "https://api.github.com/users/Bhavya6187/orgs", "received_events_url": "https://api.github.com/users/Bhavya6187/received_events", "repos_url": "https://api.github.com/users/Bhavya6187/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Bhavya6187/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bhavya6187/subscriptions", "type": "User", "url": "https://api.github.com/users/Bhavya6187", "user_view_type": "public" }
[]
open
false
null
[]
null
1
2025-01-06T17:42:49Z
2025-01-06T17:43:31Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug Hey folks, I am trying to run this code - ```python from datasets import load_dataset, get_dataset_config_names ds = load_dataset("mteb/arguana") ``` with HF_HUB_OFFLINE=1 But I get the following error - ```python Using the latest cached version of the dataset since mteb/arguana couldn't be found on the Hugging Face Hub (offline mode is enabled). --------------------------------------------------------------------------- ValueError Traceback (most recent call last) Cell In[2], line 1 ----> 1 ds = load_dataset("mteb/arguana") File ~/env/lib/python3.10/site-packages/datasets/load.py:2129, in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, verification_mode, keep_in_memory, save_infos, revision, token, streaming, num_proc, storage_options, trust_remote_code, **config_kwargs) 2124 verification_mode = VerificationMode( 2125 (verification_mode or VerificationMode.BASIC_CHECKS) if not save_infos else VerificationMode.ALL_CHECKS 2126 ) 2128 # Create a dataset builder -> 2129 builder_instance = load_dataset_builder( 2130 path=path, 2131 name=name, 2132 data_dir=data_dir, 2133 data_files=data_files, 2134 cache_dir=cache_dir, 2135 features=features, 2136 download_config=download_config, 2137 download_mode=download_mode, 2138 revision=revision, 2139 token=token, 2140 storage_options=storage_options, 2141 trust_remote_code=trust_remote_code, 2142 _require_default_config_name=name is None, 2143 **config_kwargs, 2144 ) 2146 # Return iterable dataset in case of streaming 2147 if streaming: File ~/env/lib/python3.10/site-packages/datasets/load.py:1886, in load_dataset_builder(path, name, data_dir, data_files, cache_dir, features, download_config, download_mode, revision, token, storage_options, trust_remote_code, _require_default_config_name, **config_kwargs) 1884 builder_cls = get_dataset_builder_class(dataset_module, dataset_name=dataset_name) 1885 # Instantiate the dataset builder -> 1886 builder_instance: DatasetBuilder = builder_cls( 1887 cache_dir=cache_dir, 1888 dataset_name=dataset_name, 1889 config_name=config_name, 1890 data_dir=data_dir, 1891 data_files=data_files, 1892 hash=dataset_module.hash, 1893 info=info, 1894 features=features, 1895 token=token, 1896 storage_options=storage_options, 1897 **builder_kwargs, 1898 **config_kwargs, 1899 ) 1900 builder_instance._use_legacy_cache_dir_if_possible(dataset_module) 1902 return builder_instance File ~/env/lib/python3.10/site-packages/datasets/packaged_modules/cache/cache.py:124, in Cache.__init__(self, cache_dir, dataset_name, config_name, version, hash, base_path, info, features, token, repo_id, data_files, data_dir, storage_options, writer_batch_size, **config_kwargs) 122 config_kwargs["data_dir"] = data_dir 123 if hash == "auto" and version == "auto": --> 124 config_name, version, hash = _find_hash_in_cache( 125 dataset_name=repo_id or dataset_name, 126 config_name=config_name, 127 cache_dir=cache_dir, 128 config_kwargs=config_kwargs, 129 custom_features=features, 130 ) 131 elif hash == "auto" or version == "auto": 132 raise NotImplementedError("Pass both hash='auto' and version='auto' instead") File ~/env/lib/python3.10/site-packages/datasets/packaged_modules/cache/cache.py:84, in _find_hash_in_cache(dataset_name, config_name, cache_dir, config_kwargs, custom_features) 72 other_configs = [ 73 Path(_cached_directory_path).parts[-3] 74 for _cached_directory_path in glob.glob(os.path.join(cached_datasets_directory_path_root, "*", version, hash)) (...) 81 ) 82 ] 83 if not config_id and len(other_configs) > 1: ---> 84 raise ValueError( 85 f"There are multiple '{dataset_name}' configurations in the cache: {', '.join(other_configs)}" 86 f"\nPlease specify which configuration to reload from the cache, e.g." 87 f"\n\tload_dataset('{dataset_name}', '{other_configs[0]}')" 88 ) 89 config_name = cached_directory_path.parts[-3] 90 warning_msg = ( 91 f"Found the latest cached dataset configuration '{config_name}' at {cached_directory_path} " 92 f"(last modified on {time.ctime(_get_modification_time(cached_directory_path))})." 93 ) ValueError: There are multiple 'mteb/arguana' configurations in the cache: queries, corpus, default Please specify which configuration to reload from the cache, e.g. load_dataset('mteb/arguana', 'queries') ``` It works when I run the same code with HF_HUB_OFFLINE=0, but after the data is downloaded, I turn off the HF hub cache with HF_HUB_OFFLINE=1, and then this error appears. Are there some files I am missing with hub disabled? ### Steps to reproduce the bug from datasets import load_dataset, get_dataset_config_names ds = load_dataset("mteb/arguana") with HF_HUB_OFFLINE=1 (after already running it with HF_HUB_OFFLINE=0 and populating the datasets cache) ### Expected behavior Dataset loaded successfully as it does with HF_HUB_OFFLINE=1 ### Environment info - `datasets` version: 3.2.0 - Platform: Linux-5.15.148.2-2.cm2-x86_64-with-glibc2.35 - Python version: 3.10.14 - `huggingface_hub` version: 0.27.0 - PyArrow version: 17.0.0 - Pandas version: 2.2.3 - `fsspec` version: 2024.6.1
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7359/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7359/timeline
null
null
null
null