url
stringlengths 61
61
| repository_url
stringclasses 1
value | labels_url
stringlengths 75
75
| comments_url
stringlengths 70
70
| events_url
stringlengths 68
68
| html_url
stringlengths 49
51
| id
int64 942M
3.2B
| node_id
stringlengths 18
32
| number
int64 2.63k
7.67k
| title
stringlengths 1
290
| user
dict | labels
listlengths 0
4
| state
stringclasses 2
values | locked
bool 1
class | assignee
dict | assignees
listlengths 0
4
| milestone
dict | comments
int64 0
49
| created_at
stringdate 2021-07-12 19:58:31
2025-07-03 11:24:15
| updated_at
stringdate 2021-07-13 05:45:26
2025-07-03 18:34:32
| closed_at
stringlengths 20
20
β | author_association
stringclasses 4
values | type
null | active_lock_reason
null | sub_issues_summary
dict | body
stringlengths 0
58.6k
β | closed_by
dict | reactions
dict | timeline_url
stringlengths 70
70
| performed_via_github_app
null | state_reason
stringclasses 4
values | draft
bool 2
classes | pull_request
dict |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
https://api.github.com/repos/huggingface/datasets/issues/6835 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6835/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6835/comments | https://api.github.com/repos/huggingface/datasets/issues/6835/events | https://github.com/huggingface/datasets/pull/6835 | 2,261,079,263 | PR_kwDODunzps5tl2fc | 6,835 | Support pyarrow LargeListType | {
"avatar_url": "https://avatars.githubusercontent.com/u/37351874?v=4",
"events_url": "https://api.github.com/users/Modexus/events{/privacy}",
"followers_url": "https://api.github.com/users/Modexus/followers",
"following_url": "https://api.github.com/users/Modexus/following{/other_user}",
"gists_url": "https://api.github.com/users/Modexus/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/Modexus",
"id": 37351874,
"login": "Modexus",
"node_id": "MDQ6VXNlcjM3MzUxODc0",
"organizations_url": "https://api.github.com/users/Modexus/orgs",
"received_events_url": "https://api.github.com/users/Modexus/received_events",
"repos_url": "https://api.github.com/users/Modexus/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/Modexus/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Modexus/subscriptions",
"type": "User",
"url": "https://api.github.com/users/Modexus",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 3 | 2024-04-24T11:34:24Z | 2024-08-12T14:43:47Z | 2024-08-12T14:43:47Z | CONTRIBUTOR | null | null | null | Fixes #6834 | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6835/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6835/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6835.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6835",
"merged_at": null,
"patch_url": "https://github.com/huggingface/datasets/pull/6835.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6835"
} |
https://api.github.com/repos/huggingface/datasets/issues/6834 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6834/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6834/comments | https://api.github.com/repos/huggingface/datasets/issues/6834/events | https://github.com/huggingface/datasets/issues/6834 | 2,261,078,104 | I_kwDODunzps6GxVBY | 6,834 | largelisttype not supported (.from_polars()) | {
"avatar_url": "https://avatars.githubusercontent.com/u/37351874?v=4",
"events_url": "https://api.github.com/users/Modexus/events{/privacy}",
"followers_url": "https://api.github.com/users/Modexus/followers",
"following_url": "https://api.github.com/users/Modexus/following{/other_user}",
"gists_url": "https://api.github.com/users/Modexus/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/Modexus",
"id": 37351874,
"login": "Modexus",
"node_id": "MDQ6VXNlcjM3MzUxODc0",
"organizations_url": "https://api.github.com/users/Modexus/orgs",
"received_events_url": "https://api.github.com/users/Modexus/received_events",
"repos_url": "https://api.github.com/users/Modexus/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/Modexus/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Modexus/subscriptions",
"type": "User",
"url": "https://api.github.com/users/Modexus",
"user_view_type": "public"
} | [] | closed | false | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | [
{
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
}
] | null | 0 | 2024-04-24T11:33:43Z | 2024-08-12T14:43:46Z | 2024-08-12T14:43:46Z | CONTRIBUTOR | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
The following code fails because LargeListType is not supported.
This is especially a problem for .from_polars since polars uses LargeListType.
### Steps to reproduce the bug
```python
import datasets
import polars as pl
df = pl.DataFrame({"list": [[]]})
datasets.Dataset.from_polars(df)
```
### Expected behavior
Convert LargeListType to list.
### Environment info
- `datasets` version: 2.19.1.dev0
- Platform: Linux-6.8.7-200.fc39.x86_64-x86_64-with-glibc2.38
- Python version: 3.12.2
- `huggingface_hub` version: 0.22.2
- PyArrow version: 16.0.0
- Pandas version: 2.1.4
- `fsspec` version: 2024.3.1 | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6834/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6834/timeline | null | completed | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6833 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6833/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6833/comments | https://api.github.com/repos/huggingface/datasets/issues/6833/events | https://github.com/huggingface/datasets/issues/6833 | 2,259,731,274 | I_kwDODunzps6GsMNK | 6,833 | Super slow iteration with trivial custom transform | {
"avatar_url": "https://avatars.githubusercontent.com/u/2780075?v=4",
"events_url": "https://api.github.com/users/xslittlegrass/events{/privacy}",
"followers_url": "https://api.github.com/users/xslittlegrass/followers",
"following_url": "https://api.github.com/users/xslittlegrass/following{/other_user}",
"gists_url": "https://api.github.com/users/xslittlegrass/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/xslittlegrass",
"id": 2780075,
"login": "xslittlegrass",
"node_id": "MDQ6VXNlcjI3ODAwNzU=",
"organizations_url": "https://api.github.com/users/xslittlegrass/orgs",
"received_events_url": "https://api.github.com/users/xslittlegrass/received_events",
"repos_url": "https://api.github.com/users/xslittlegrass/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/xslittlegrass/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/xslittlegrass/subscriptions",
"type": "User",
"url": "https://api.github.com/users/xslittlegrass",
"user_view_type": "public"
} | [] | open | false | null | [] | null | 7 | 2024-04-23T20:40:59Z | 2024-10-08T15:41:18Z | null | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
Dataset is 10X slower when applying trivial transforms:
```
import time
import numpy as np
from datasets import Dataset, Features, Array2D
a = np.zeros((800, 800))
a = np.stack([a] * 1000)
features = Features({"a": Array2D(shape=(800, 800), dtype="uint8")})
ds1 = Dataset.from_dict({"a": a}, features=features).with_format('numpy')
def transform(batch):
return batch
ds2 = ds1.with_transform(transform)
%time sum(1 for _ in ds1)
%time sum(1 for _ in ds2)
```
```
CPU times: user 472 ms, sys: 319 ms, total: 791 ms
Wall time: 794 ms
CPU times: user 9.32 s, sys: 443 ms, total: 9.76 s
Wall time: 9.78 s
```
In my real code I'm using set_transform to apply some post-processing on-the-fly for the 2d array, but it significantly slows down the dataset even if the transform itself is trivial.
Related issue: https://github.com/huggingface/datasets/issues/5841
### Steps to reproduce the bug
Use code in the description to reproduce.
### Expected behavior
Trivial custom transform in the example should not slowdown the dataset iteration.
### Environment info
- `datasets` version: 2.18.0
- Platform: Linux-5.15.0-79-generic-x86_64-with-glibc2.35
- Python version: 3.11.4
- `huggingface_hub` version: 0.20.2
- PyArrow version: 15.0.0
- Pandas version: 1.5.3
- `fsspec` version: 2023.12.2 | null | {
"+1": 3,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 3,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6833/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6833/timeline | null | null | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6832 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6832/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6832/comments | https://api.github.com/repos/huggingface/datasets/issues/6832/events | https://github.com/huggingface/datasets/pull/6832 | 2,258,761,447 | PR_kwDODunzps5teFoJ | 6,832 | Support downloading specific splits in `load_dataset` | {
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mariosasko",
"id": 47462742,
"login": "mariosasko",
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mariosasko",
"user_view_type": "public"
} | [] | open | false | null | [] | null | 4 | 2024-04-23T12:32:27Z | 2024-08-19T15:19:38Z | null | COLLABORATOR | null | null | null | This PR builds on https://github.com/huggingface/datasets/pull/6639 to support downloading only the specified splits in `load_dataset`. For this to work, a builder's `_split_generators` need to be able to accept the requested splits (as a list) via a `splits` argument to avoid processing the non-requested ones. Also, the builder has to define a `_available_splits` method that lists all the possible `splits` values.
Close https://github.com/huggingface/datasets/issues/4101, close https://github.com/huggingface/datasets/issues/2538 (I'm probably missing some)
Should also make it possible to address https://github.com/huggingface/datasets/issues/6793 | null | {
"+1": 2,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 2,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6832/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6832/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6832.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6832",
"merged_at": null,
"patch_url": "https://github.com/huggingface/datasets/pull/6832.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6832"
} |
https://api.github.com/repos/huggingface/datasets/issues/6831 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6831/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6831/comments | https://api.github.com/repos/huggingface/datasets/issues/6831/events | https://github.com/huggingface/datasets/pull/6831 | 2,258,537,405 | PR_kwDODunzps5tdTy_ | 6,831 | Add docs about the CLI | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 3 | 2024-04-23T10:41:03Z | 2024-04-26T16:51:09Z | 2024-04-25T10:44:10Z | MEMBER | null | null | null | Add docs about the CLI.
Close #6830.
CC: @severo | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6831/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6831/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6831.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6831",
"merged_at": "2024-04-25T10:44:10Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6831.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6831"
} |
https://api.github.com/repos/huggingface/datasets/issues/6830 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6830/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6830/comments | https://api.github.com/repos/huggingface/datasets/issues/6830/events | https://github.com/huggingface/datasets/issues/6830 | 2,258,433,178 | I_kwDODunzps6GnPSa | 6,830 | Add a doc page for the convert_to_parquet CLI | {
"avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4",
"events_url": "https://api.github.com/users/severo/events{/privacy}",
"followers_url": "https://api.github.com/users/severo/followers",
"following_url": "https://api.github.com/users/severo/following{/other_user}",
"gists_url": "https://api.github.com/users/severo/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/severo",
"id": 1676121,
"login": "severo",
"node_id": "MDQ6VXNlcjE2NzYxMjE=",
"organizations_url": "https://api.github.com/users/severo/orgs",
"received_events_url": "https://api.github.com/users/severo/received_events",
"repos_url": "https://api.github.com/users/severo/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/severo/subscriptions",
"type": "User",
"url": "https://api.github.com/users/severo",
"user_view_type": "public"
} | [
{
"color": "0075ca",
"default": true,
"description": "Improvements or additions to documentation",
"id": 1935892861,
"name": "documentation",
"node_id": "MDU6TGFiZWwxOTM1ODkyODYx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/documentation"
}
] | closed | false | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | [
{
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
}
] | null | 0 | 2024-04-23T09:49:04Z | 2024-04-25T10:44:11Z | 2024-04-25T10:44:11Z | COLLABORATOR | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | Follow-up to https://github.com/huggingface/datasets/pull/6795. Useful for https://github.com/huggingface/dataset-viewer/issues/2742. cc @albertvillanova | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 1,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 1,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6830/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6830/timeline | null | completed | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6829 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6829/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6829/comments | https://api.github.com/repos/huggingface/datasets/issues/6829/events | https://github.com/huggingface/datasets/issues/6829 | 2,258,424,577 | I_kwDODunzps6GnNMB | 6,829 | Load and save from/to disk no longer accept pathlib.Path | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | [
{
"color": "d73a4a",
"default": true,
"description": "Something isn't working",
"id": 1935892857,
"name": "bug",
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug"
}
] | open | false | null | [] | null | 0 | 2024-04-23T09:44:45Z | 2024-04-23T09:44:46Z | null | MEMBER | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | Reported by @vttrifonov at https://github.com/huggingface/datasets/pull/6704#issuecomment-2071168296:
> This change is breaking in
> https://github.com/huggingface/datasets/blob/f96e74d5c633cd5435dd526adb4a74631eb05c43/src/datasets/arrow_dataset.py#L1515
> when the input is `pathlib.Path`. The issue is that `url_to_fs` expects a `str` and cannot deal with `Path`. `get_fs_token_paths` converts to `str` so it is not a problem
This change was introduced in:
- #6704 | null | {
"+1": 1,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 1,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6829/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6829/timeline | null | null | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6828 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6828/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6828/comments | https://api.github.com/repos/huggingface/datasets/issues/6828/events | https://github.com/huggingface/datasets/pull/6828 | 2,258,420,421 | PR_kwDODunzps5tc55y | 6,828 | Support PathLike input in save_to_disk / load_from_disk | {
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/lhoestq",
"id": 42851186,
"login": "lhoestq",
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"type": "User",
"url": "https://api.github.com/users/lhoestq",
"user_view_type": "public"
} | [] | open | false | null | [] | null | 1 | 2024-04-23T09:42:38Z | 2024-04-23T11:05:52Z | null | MEMBER | null | null | null | null | null | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6828/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6828/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6828.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6828",
"merged_at": null,
"patch_url": "https://github.com/huggingface/datasets/pull/6828.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6828"
} |
https://api.github.com/repos/huggingface/datasets/issues/6827 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6827/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6827/comments | https://api.github.com/repos/huggingface/datasets/issues/6827/events | https://github.com/huggingface/datasets/issues/6827 | 2,254,011,833 | I_kwDODunzps6GWX25 | 6,827 | Loading a remote dataset fails in the last release (v2.19.0) | {
"avatar_url": "https://avatars.githubusercontent.com/u/35369637?v=4",
"events_url": "https://api.github.com/users/zrthxn/events{/privacy}",
"followers_url": "https://api.github.com/users/zrthxn/followers",
"following_url": "https://api.github.com/users/zrthxn/following{/other_user}",
"gists_url": "https://api.github.com/users/zrthxn/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/zrthxn",
"id": 35369637,
"login": "zrthxn",
"node_id": "MDQ6VXNlcjM1MzY5NjM3",
"organizations_url": "https://api.github.com/users/zrthxn/orgs",
"received_events_url": "https://api.github.com/users/zrthxn/received_events",
"repos_url": "https://api.github.com/users/zrthxn/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/zrthxn/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/zrthxn/subscriptions",
"type": "User",
"url": "https://api.github.com/users/zrthxn",
"user_view_type": "public"
} | [] | open | false | null | [] | null | 0 | 2024-04-19T21:11:58Z | 2024-04-19T21:13:42Z | null | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | While loading a dataset with multiple splits I get an error saying `Couldn't find file at <URL>`
I am loading the dataset like so, nothing out of the ordinary.
This dataset needs a token to access it.
```
token="hf_myhftoken-sdhbdsjgkhbd"
load_dataset("speechcolab/gigaspeech", "test", cache_dir=f"gigaspeech/test", token=token)
```
I get the following error

Now you can see that the URL that it is trying to reach has the JSON object of the dataset split appended to the base URL. I think this may be due to a newly introduced issue.
I did not have this issue with the previous version of the datasets. Everything was fine for me yesterday and after the release 12 hours ago, this seems to have broken. Also, the dataset in question runs custom code and I checked and there have been no commits to the dataset on Huggingface in 6 months.
### Steps to reproduce the bug
Since this happened with one particular dataset for me, I am listing steps to use that dataset.
1. Open https://huggingface.co/datasets/speechcolab/gigaspeech and fill the form to get access.
2. Create a token on your huggingface account with read access.
3. Run the following line, substituing `<your_token_here>` with your token.
```
load_dataset("speechcolab/gigaspeech", "test", cache_dir=f"gigaspeech/test", token="<your_token_here>")
```
### Expected behavior
Be able to load the dataset in question.
### Environment info
datasets == 2.19.0
python == 3.10
kernel == Linux 6.1.58+ | null | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6827/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6827/timeline | null | null | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6826 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6826/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6826/comments | https://api.github.com/repos/huggingface/datasets/issues/6826/events | https://github.com/huggingface/datasets/pull/6826 | 2,252,445,242 | PR_kwDODunzps5tJMZh | 6,826 | Set dev version | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 2 | 2024-04-19T08:51:42Z | 2024-04-19T09:05:25Z | 2024-04-19T08:52:14Z | MEMBER | null | null | null | null | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6826/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6826/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6826.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6826",
"merged_at": "2024-04-19T08:52:13Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6826.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6826"
} |
https://api.github.com/repos/huggingface/datasets/issues/6825 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6825/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6825/comments | https://api.github.com/repos/huggingface/datasets/issues/6825/events | https://github.com/huggingface/datasets/pull/6825 | 2,252,404,599 | PR_kwDODunzps5tJEMw | 6,825 | Release: 2.19.0 | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 2 | 2024-04-19T08:29:02Z | 2024-05-04T12:23:26Z | 2024-04-19T08:44:57Z | MEMBER | null | null | null | null | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 1,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 1,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6825/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6825/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6825.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6825",
"merged_at": "2024-04-19T08:44:57Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6825.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6825"
} |
https://api.github.com/repos/huggingface/datasets/issues/6824 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6824/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6824/comments | https://api.github.com/repos/huggingface/datasets/issues/6824/events | https://github.com/huggingface/datasets/issues/6824 | 2,251,076,197 | I_kwDODunzps6GLLJl | 6,824 | Winogrande does not seem to be compatible with datasets version of 1.18.0 | {
"avatar_url": "https://avatars.githubusercontent.com/u/7878204?v=4",
"events_url": "https://api.github.com/users/spliew/events{/privacy}",
"followers_url": "https://api.github.com/users/spliew/followers",
"following_url": "https://api.github.com/users/spliew/following{/other_user}",
"gists_url": "https://api.github.com/users/spliew/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/spliew",
"id": 7878204,
"login": "spliew",
"node_id": "MDQ6VXNlcjc4NzgyMDQ=",
"organizations_url": "https://api.github.com/users/spliew/orgs",
"received_events_url": "https://api.github.com/users/spliew/received_events",
"repos_url": "https://api.github.com/users/spliew/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/spliew/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/spliew/subscriptions",
"type": "User",
"url": "https://api.github.com/users/spliew",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 2 | 2024-04-18T16:11:04Z | 2024-04-19T09:53:15Z | 2024-04-19T09:52:33Z | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
I get the following error when simply running `load_dataset('winogrande','winogrande_xl')`.
I do not have such an issue in the 1.17.0 version.
```Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.10/dist-packages/datasets/load.py", line 2556, in load_dataset
builder_instance = load_dataset_builder(
File "/usr/local/lib/python3.10/dist-packages/datasets/load.py", line 2265, in load_dataset_builder
builder_instance: DatasetBuilder = builder_cls(
File "/usr/local/lib/python3.10/dist-packages/datasets/builder.py", line 371, in __init__
self.config, self.config_id = self._create_builder_config(
File "/usr/local/lib/python3.10/dist-packages/datasets/builder.py", line 620, in _create_builder_config
builder_config._resolve_data_files(
File "/usr/local/lib/python3.10/dist-packages/datasets/builder.py", line 211, in _resolve_data_files
self.data_files = self.data_files.resolve(base_path, download_config)
File "/usr/local/lib/python3.10/dist-packages/datasets/data_files.py", line 799, in resolve
out[key] = data_files_patterns_list.resolve(base_path, download_config)
File "/usr/local/lib/python3.10/dist-packages/datasets/data_files.py", line 752, in resolve
resolve_pattern(
File "/usr/local/lib/python3.10/dist-packages/datasets/data_files.py", line 393, in resolve_pattern
raise FileNotFoundError(error_msg)
FileNotFoundError: Unable to find 'hf://datasets/winogrande@ebf71e3c7b5880d019ecf6099c0b09311b1084f5/winogrande_xl/train/0000.parquet' with any supported extension ['.csv', '.tsv', '.json', '.jsonl', '.parquet', '.geoparquet', '.gpq', '.arrow', '.txt', '.tar', '.blp', '.bmp', '.dib', '.bufr', '.cur', '.pcx', '.dcx', '.dds', '.ps', '.eps', '.fit', '.fits', '.fli', '.flc', '.ftc', '.ftu', '.gbr', '.gif', '.grib', '.h5', '.hdf', '.png', '.apng', '.jp2', '.j2k', '.jpc', '.jpf', '.jpx', '.j2c', '.icns', '.ico', '.im', '.iim', '.tif', '.tiff', '.jfif', '.jpe', '.jpg', '.jpeg', '.mpg', '.mpeg', '.msp', '.pcd', '.pxr', '.pbm', '.pgm', '.ppm', '.pnm', '.psd', '.bw', '.rgb', '.rgba', '.sgi', '.ras', '.tga', '.icb', '.vda', '.vst', '.webp', '.wmf', '.emf', '.xbm', '.xpm', '.BLP', '.BMP', '.DIB', '.BUFR', '.CUR', '.PCX', '.DCX', '.DDS', '.PS', '.EPS', '.FIT', '.FITS', '.FLI', '.FLC', '.FTC', '.FTU', '.GBR', '.GIF', '.GRIB', '.H5', '.HDF', '.PNG', '.APNG', '.JP2', '.J2K', '.JPC', '.JPF', '.JPX', '.J2C', '.ICNS', '.ICO', '.IM', '.IIM', '.TIF', '.TIFF', '.JFIF', '.JPE', '.JPG', '.JPEG', '.MPG', '.MPEG', '.MSP', '.PCD', '.PXR', '.PBM', '.PGM', '.PPM', '.PNM', '.PSD', '.BW', '.RGB', '.RGBA', '.SGI', '.RAS', '.TGA', '.ICB', '.VDA', '.VST', '.WEBP', '.WMF', '.EMF', '.XBM', '.XPM', '.aiff', '.au', '.avr', '.caf', '.flac', '.htk', '.svx', '.mat4', '.mat5', '.mpc2k', '.ogg', '.paf', '.pvf', '.raw', '.rf64', '.sd2', '.sds', '.ircam', '.voc', '.w64', '.wav', '.nist', '.wavex', '.wve', '.xi', '.mp3', '.opus', '.AIFF', '.AU', '.AVR', '.CAF', '.FLAC', '.HTK', '.SVX', '.MAT4', '.MAT5', '.MPC2K', '.OGG', '.PAF', '.PVF', '.RAW', '.RF64', '.SD2', '.SDS', '.IRCAM', '.VOC', '.W64', '.WAV', '.NIST', '.WAVEX', '.WVE', '.XI', '.MP3', '.OPUS', '.zip']```
### Steps to reproduce the bug
from datasets import load_dataset
datasets = load_dataset('winogrande','winogrande_xl')
### Expected behavior
```Downloading data: 100%|ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 2.06M/2.06M [00:00<00:00, 5.16MB/s]
Downloading data: 100%|βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 118k/118k [00:00<00:00, 360kB/s]
Downloading data: 100%|βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 85.9k/85.9k [00:00<00:00, 242kB/s]
Generating train split: 100%|ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 40398/40398 [00:00<00:00, 845491.12 examples/s]
Generating test split: 100%|βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 1767/1767 [00:00<00:00, 362501.11 examples/s]
Generating validation split: 100%|βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 1267/1267 [00:00<00:00, 318768.11 examples/s]```
### Environment info
datasets version: 1.18.0
| {
"avatar_url": "https://avatars.githubusercontent.com/u/7878204?v=4",
"events_url": "https://api.github.com/users/spliew/events{/privacy}",
"followers_url": "https://api.github.com/users/spliew/followers",
"following_url": "https://api.github.com/users/spliew/following{/other_user}",
"gists_url": "https://api.github.com/users/spliew/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/spliew",
"id": 7878204,
"login": "spliew",
"node_id": "MDQ6VXNlcjc4NzgyMDQ=",
"organizations_url": "https://api.github.com/users/spliew/orgs",
"received_events_url": "https://api.github.com/users/spliew/received_events",
"repos_url": "https://api.github.com/users/spliew/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/spliew/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/spliew/subscriptions",
"type": "User",
"url": "https://api.github.com/users/spliew",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6824/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6824/timeline | null | completed | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6823 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6823/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6823/comments | https://api.github.com/repos/huggingface/datasets/issues/6823/events | https://github.com/huggingface/datasets/issues/6823 | 2,250,775,569 | I_kwDODunzps6GKBwR | 6,823 | Loading problems of Datasets with a single shard | {
"avatar_url": "https://avatars.githubusercontent.com/u/60151338?v=4",
"events_url": "https://api.github.com/users/andjoer/events{/privacy}",
"followers_url": "https://api.github.com/users/andjoer/followers",
"following_url": "https://api.github.com/users/andjoer/following{/other_user}",
"gists_url": "https://api.github.com/users/andjoer/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/andjoer",
"id": 60151338,
"login": "andjoer",
"node_id": "MDQ6VXNlcjYwMTUxMzM4",
"organizations_url": "https://api.github.com/users/andjoer/orgs",
"received_events_url": "https://api.github.com/users/andjoer/received_events",
"repos_url": "https://api.github.com/users/andjoer/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/andjoer/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/andjoer/subscriptions",
"type": "User",
"url": "https://api.github.com/users/andjoer",
"user_view_type": "public"
} | [] | open | false | null | [] | null | 2 | 2024-04-18T13:59:00Z | 2024-11-25T05:40:09Z | null | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
When saving a dataset on disk and it has a single shard it is not loaded as when it is saved in multiple shards. I installed the latest version of datasets via pip.
### Steps to reproduce the bug
The code below reproduces the behavior. All works well when the range of the loop is 10000 but it fails when it is 1000.
```
from PIL import Image
import numpy as np
from datasets import Dataset, DatasetDict, load_dataset
def load_image():
# Generate random noise image
noise = np.random.randint(0, 256, (256, 256, 3), dtype=np.uint8)
return Image.fromarray(noise)
def create_dataset():
input_images = []
output_images = []
text_prompts = []
for _ in range(10000): # this is the problematic parameter
input_images.append(load_image())
output_images.append(load_image())
text_prompts.append('test prompt')
data = {'input_image': input_images, 'output_image': output_images, 'text_prompt': text_prompts}
dataset = Dataset.from_dict(data)
return DatasetDict({'train': dataset})
dataset = create_dataset()
print('dataset before saving')
print(dataset)
print(dataset['train'].column_names)
dataset.save_to_disk('test_ds')
print('dataset after loading')
dataset_loaded = load_dataset('test_ds')
print(dataset_loaded)
print(dataset_loaded['train'].column_names)
```
The output for 1000 iterations is:
```
dataset before saving
DatasetDict({
train: Dataset({
features: ['input_image', 'output_image', 'text_prompt'],
num_rows: 1000
})
})
['input_image', 'output_image', 'text_prompt']
Saving the dataset (1/1 shards): 100%|β| 1000/1000 [00:00<00:00, 5156.00 example
dataset after loading
Generating train split: 1 examples [00:00, 230.52 examples/s]
DatasetDict({
train: Dataset({
features: ['_data_files', '_fingerprint', '_format_columns', '_format_kwargs', '_format_type', '_output_all_columns', '_split'],
num_rows: 1
})
})
['_data_files', '_fingerprint', '_format_columns', '_format_kwargs', '_format_type', '_output_all_columns', '_split']
```
For 10000 iteration (8 shards) it is correct:
```
dataset before saving
DatasetDict({
train: Dataset({
features: ['input_image', 'output_image', 'text_prompt'],
num_rows: 10000
})
})
['input_image', 'output_image', 'text_prompt']
Saving the dataset (8/8 shards): 100%|β| 10000/10000 [00:01<00:00, 6237.68 examp
dataset after loading
Generating train split: 10000 examples [00:00, 10773.16 examples/s]
DatasetDict({
train: Dataset({
features: ['input_image', 'output_image', 'text_prompt'],
num_rows: 10000
})
})
['input_image', 'output_image', 'text_prompt']
```
### Expected behavior
The procedure should work for a dataset with one shrad the same as for one with multiple shards
### Environment info
- `datasets` version: 2.18.0
- Platform: macOS-14.1-arm64-arm-64bit
- Python version: 3.11.8
- `huggingface_hub` version: 0.22.2
- PyArrow version: 15.0.2
- Pandas version: 2.2.2
- `fsspec` version: 2024.2.0
Edit: I looked in the source code of load.py in datasets. I should have used "load_from_disk" and it indeed works that way. But ideally load_dataset would have raisen an error the same way as if I call a path:
```
if Path(path, config.DATASET_STATE_JSON_FILENAME).exists():
raise ValueError(
"You are trying to load a dataset that was saved using `save_to_disk`. "
"Please use `load_from_disk` instead."
)
```
nevertheless I find it interesting that it works just well and without a warning if there are multiple shards. | null | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6823/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6823/timeline | null | null | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6822 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6822/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6822/comments | https://api.github.com/repos/huggingface/datasets/issues/6822/events | https://github.com/huggingface/datasets/pull/6822 | 2,250,316,258 | PR_kwDODunzps5tB8aD | 6,822 | Fix parquet export infos | {
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/lhoestq",
"id": 42851186,
"login": "lhoestq",
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"type": "User",
"url": "https://api.github.com/users/lhoestq",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 2 | 2024-04-18T10:21:41Z | 2024-04-18T11:15:41Z | 2024-04-18T11:09:13Z | MEMBER | null | null | null | Don't use the parquet export infos when USE_PARQUET_EXPORT is False.
Otherwise the `datasets-server` might reuse erroneous data when re-running a job
this follows https://github.com/huggingface/datasets/pull/6714 | {
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/lhoestq",
"id": 42851186,
"login": "lhoestq",
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"type": "User",
"url": "https://api.github.com/users/lhoestq",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6822/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6822/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6822.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6822",
"merged_at": "2024-04-18T11:09:13Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6822.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6822"
} |
https://api.github.com/repos/huggingface/datasets/issues/6820 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6820/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6820/comments | https://api.github.com/repos/huggingface/datasets/issues/6820/events | https://github.com/huggingface/datasets/pull/6820 | 2,248,471,673 | PR_kwDODunzps5s7sgy | 6,820 | Allow deleting a subset/config from a no-script dataset | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 6 | 2024-04-17T14:41:12Z | 2024-05-02T07:31:03Z | 2024-04-30T09:44:24Z | MEMBER | null | null | null | TODO:
- [x] Add docs
- [x] Delete token arg from CLI example
- See: #6839
Close #6810. | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6820/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6820/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6820.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6820",
"merged_at": "2024-04-30T09:44:24Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6820.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6820"
} |
https://api.github.com/repos/huggingface/datasets/issues/6819 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6819/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6819/comments | https://api.github.com/repos/huggingface/datasets/issues/6819/events | https://github.com/huggingface/datasets/issues/6819 | 2,248,043,797 | I_kwDODunzps6F_m0V | 6,819 | Give more details in `DataFilesNotFoundError` when getting the config names | {
"avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4",
"events_url": "https://api.github.com/users/severo/events{/privacy}",
"followers_url": "https://api.github.com/users/severo/followers",
"following_url": "https://api.github.com/users/severo/following{/other_user}",
"gists_url": "https://api.github.com/users/severo/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/severo",
"id": 1676121,
"login": "severo",
"node_id": "MDQ6VXNlcjE2NzYxMjE=",
"organizations_url": "https://api.github.com/users/severo/orgs",
"received_events_url": "https://api.github.com/users/severo/received_events",
"repos_url": "https://api.github.com/users/severo/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/severo/subscriptions",
"type": "User",
"url": "https://api.github.com/users/severo",
"user_view_type": "public"
} | [
{
"color": "a2eeef",
"default": true,
"description": "New feature or request",
"id": 1935892871,
"name": "enhancement",
"node_id": "MDU6TGFiZWwxOTM1ODkyODcx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement"
}
] | open | false | null | [] | null | 0 | 2024-04-17T11:19:47Z | 2024-04-17T11:19:47Z | null | COLLABORATOR | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Feature request
After https://huggingface.co/datasets/cis-lmu/Glot500/commit/39060e01272ff228cc0ce1d31ae53789cacae8c3, the dataset viewer gives the following error:
```
{
"error": "Cannot get the config names for the dataset.",
"cause_exception": "DataFilesNotFoundError",
"cause_message": "No (supported) data files found in cis-lmu/Glot500",
"cause_traceback": [
"Traceback (most recent call last):\n",
" File \"/src/services/worker/src/worker/job_runners/dataset/config_names.py\", line 73, in compute_config_names_response\n config_names = get_dataset_config_names(\n",
" File \"/src/services/worker/.venv/lib/python3.9/site-packages/datasets/inspect.py\", line 347, in get_dataset_config_names\n dataset_module = dataset_module_factory(\n",
" File \"/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py\", line 1873, in dataset_module_factory\n raise e1 from None\n",
" File \"/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py\", line 1854, in dataset_module_factory\n return HubDatasetModuleFactoryWithoutScript(\n",
" File \"/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py\", line 1245, in get_module\n module_name, default_builder_kwargs = infer_module_for_data_files(\n",
" File \"/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py\", line 595, in infer_module_for_data_files\n raise DataFilesNotFoundError(\"No (supported) data files found\" + (f\" in {path}\" if path else \"\"))\n",
"datasets.exceptions.DataFilesNotFoundError: No (supported) data files found in cis-lmu/Glot500\n"
]
}
```
because the deleted files were still listed in the README, see https://huggingface.co/datasets/cis-lmu/Glot500/discussions/4
Ideally, the error message would include the name of the first configuration with missing files, to help the user understand how to fix it. Here, it would tell that configuration `aze_Ethi` has no supported data files, instead of telling that the `cis-lmu/Glot500` *dataset* has no supported data files (which is not true).
### Motivation
Giving more detail in the error would help the Datasets Hub users to debug why the dataset viewer does not work.
### Your contribution
Not sure how to best fix this, as there are a lot of loops on the dataset configs in the traceback methods. "maybe" it would be easier to handle if the code was completely isolating each config. | null | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6819/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6819/timeline | null | null | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6817 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6817/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6817/comments | https://api.github.com/repos/huggingface/datasets/issues/6817/events | https://github.com/huggingface/datasets/pull/6817 | 2,246,578,480 | PR_kwDODunzps5s1RAN | 6,817 | Support indexable objects in `Dataset.__getitem__` | {
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mariosasko",
"id": 47462742,
"login": "mariosasko",
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mariosasko",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 2 | 2024-04-16T17:41:27Z | 2024-04-16T18:27:44Z | 2024-04-16T18:17:29Z | COLLABORATOR | null | null | null | As discussed in https://github.com/huggingface/datasets/pull/6816, this is needed to support objects that implement `__index__` such as `np.int64` in `Dataset.__getitem__`. | {
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mariosasko",
"id": 47462742,
"login": "mariosasko",
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mariosasko",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6817/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6817/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6817.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6817",
"merged_at": "2024-04-16T18:17:29Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6817.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6817"
} |
https://api.github.com/repos/huggingface/datasets/issues/6816 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6816/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6816/comments | https://api.github.com/repos/huggingface/datasets/issues/6816/events | https://github.com/huggingface/datasets/pull/6816 | 2,246,264,911 | PR_kwDODunzps5s0MYO | 6,816 | Improve typing of Dataset.search, matching definition | {
"avatar_url": "https://avatars.githubusercontent.com/u/8976546?v=4",
"events_url": "https://api.github.com/users/Dref360/events{/privacy}",
"followers_url": "https://api.github.com/users/Dref360/followers",
"following_url": "https://api.github.com/users/Dref360/following{/other_user}",
"gists_url": "https://api.github.com/users/Dref360/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/Dref360",
"id": 8976546,
"login": "Dref360",
"node_id": "MDQ6VXNlcjg5NzY1NDY=",
"organizations_url": "https://api.github.com/users/Dref360/orgs",
"received_events_url": "https://api.github.com/users/Dref360/received_events",
"repos_url": "https://api.github.com/users/Dref360/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/Dref360/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Dref360/subscriptions",
"type": "User",
"url": "https://api.github.com/users/Dref360",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 3 | 2024-04-16T14:53:39Z | 2024-04-16T15:54:10Z | 2024-04-16T15:54:10Z | CONTRIBUTOR | null | null | null | Previously, the output of `score, indices = Dataset.search(...)` would be numpy arrays.
The definition in `SearchResult` is a `List[int]` so this PR now matched the expected type.
The previous behavior is a bit annoying as `Dataset.__getitem__` doesn't support `numpy.int64` which forced me to convert `indices` to int eg:
```python
score, indices = ds.search(...)
item = ds[int(indices[0])]
``` | {
"avatar_url": "https://avatars.githubusercontent.com/u/8976546?v=4",
"events_url": "https://api.github.com/users/Dref360/events{/privacy}",
"followers_url": "https://api.github.com/users/Dref360/followers",
"following_url": "https://api.github.com/users/Dref360/following{/other_user}",
"gists_url": "https://api.github.com/users/Dref360/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/Dref360",
"id": 8976546,
"login": "Dref360",
"node_id": "MDQ6VXNlcjg5NzY1NDY=",
"organizations_url": "https://api.github.com/users/Dref360/orgs",
"received_events_url": "https://api.github.com/users/Dref360/received_events",
"repos_url": "https://api.github.com/users/Dref360/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/Dref360/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Dref360/subscriptions",
"type": "User",
"url": "https://api.github.com/users/Dref360",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6816/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6816/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6816.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6816",
"merged_at": null,
"patch_url": "https://github.com/huggingface/datasets/pull/6816.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6816"
} |
https://api.github.com/repos/huggingface/datasets/issues/6815 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6815/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6815/comments | https://api.github.com/repos/huggingface/datasets/issues/6815/events | https://github.com/huggingface/datasets/pull/6815 | 2,246,197,070 | PR_kwDODunzps5sz9eC | 6,815 | Remove `os.path.relpath` in `resolve_patterns` | {
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mariosasko",
"id": 47462742,
"login": "mariosasko",
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mariosasko",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 2 | 2024-04-16T14:23:13Z | 2024-04-16T16:06:48Z | 2024-04-16T15:58:22Z | COLLABORATOR | null | null | null | ... to save a few seconds when resolving repos with many data files. | {
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mariosasko",
"id": 47462742,
"login": "mariosasko",
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mariosasko",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6815/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6815/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6815.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6815",
"merged_at": "2024-04-16T15:58:22Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6815.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6815"
} |
https://api.github.com/repos/huggingface/datasets/issues/6814 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6814/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6814/comments | https://api.github.com/repos/huggingface/datasets/issues/6814/events | https://github.com/huggingface/datasets/issues/6814 | 2,245,857,902 | I_kwDODunzps6F3RJu | 6,814 | `map` with `num_proc` > 1 leads to OOM | {
"avatar_url": "https://avatars.githubusercontent.com/u/19718818?v=4",
"events_url": "https://api.github.com/users/bhavitvyamalik/events{/privacy}",
"followers_url": "https://api.github.com/users/bhavitvyamalik/followers",
"following_url": "https://api.github.com/users/bhavitvyamalik/following{/other_user}",
"gists_url": "https://api.github.com/users/bhavitvyamalik/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/bhavitvyamalik",
"id": 19718818,
"login": "bhavitvyamalik",
"node_id": "MDQ6VXNlcjE5NzE4ODE4",
"organizations_url": "https://api.github.com/users/bhavitvyamalik/orgs",
"received_events_url": "https://api.github.com/users/bhavitvyamalik/received_events",
"repos_url": "https://api.github.com/users/bhavitvyamalik/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/bhavitvyamalik/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/bhavitvyamalik/subscriptions",
"type": "User",
"url": "https://api.github.com/users/bhavitvyamalik",
"user_view_type": "public"
} | [] | open | false | null | [] | null | 1 | 2024-04-16T11:56:03Z | 2024-04-19T11:53:41Z | null | CONTRIBUTOR | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
When running `map` on parquet dataset loaded from local machine, the RAM usage increases linearly eventually leading to OOM. I was wondering if I should I save the `cache_file` after every n steps in order to prevent this?
### Steps to reproduce the bug
```
ds = load_dataset("parquet", data_files=dataset_path, split="train")
ds = ds.shard(num_shards=4, index=0)
ds = ds.cast_column("audio", datasets.features.Audio(sampling_rate=16_000))
ds = ds.map(prepare_dataset,
num_proc=32,
writer_batch_size=1000,
keep_in_memory=False,
desc="preprocess dataset")
```
```
def prepare_dataset(batch):
# load audio
sample = batch["audio"]
inputs = feature_extractor(sample["array"], sampling_rate=16000)
batch["input_values"] = inputs.input_values[0]
batch["input_length"] = len(sample["array"].squeeze())
return batch
```
### Expected behavior
It shouldn't run into OOM problem.
### Environment info
- `datasets` version: 2.18.0
- Platform: Linux-5.4.0-91-generic-x86_64-with-glibc2.17
- Python version: 3.8.19
- `huggingface_hub` version: 0.22.2
- PyArrow version: 15.0.2
- Pandas version: 2.0.3
- `fsspec` version: 2024.2.0 | null | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6814/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6814/timeline | null | null | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6813 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6813/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6813/comments | https://api.github.com/repos/huggingface/datasets/issues/6813/events | https://github.com/huggingface/datasets/pull/6813 | 2,245,626,870 | PR_kwDODunzps5sx-9V | 6,813 | Add Dataset.take and Dataset.skip | {
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/lhoestq",
"id": 42851186,
"login": "lhoestq",
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"type": "User",
"url": "https://api.github.com/users/lhoestq",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 2 | 2024-04-16T09:53:42Z | 2024-04-16T14:12:14Z | 2024-04-16T14:06:07Z | MEMBER | null | null | null | ...to be aligned with IterableDataset.take and IterableDataset.skip | {
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/lhoestq",
"id": 42851186,
"login": "lhoestq",
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"type": "User",
"url": "https://api.github.com/users/lhoestq",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6813/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6813/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6813.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6813",
"merged_at": "2024-04-16T14:06:07Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6813.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6813"
} |
https://api.github.com/repos/huggingface/datasets/issues/6812 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6812/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6812/comments | https://api.github.com/repos/huggingface/datasets/issues/6812/events | https://github.com/huggingface/datasets/pull/6812 | 2,244,898,824 | PR_kwDODunzps5svgoq | 6,812 | Run CI | {
"avatar_url": "https://avatars.githubusercontent.com/u/1309177?v=4",
"events_url": "https://api.github.com/users/charliermarsh/events{/privacy}",
"followers_url": "https://api.github.com/users/charliermarsh/followers",
"following_url": "https://api.github.com/users/charliermarsh/following{/other_user}",
"gists_url": "https://api.github.com/users/charliermarsh/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/charliermarsh",
"id": 1309177,
"login": "charliermarsh",
"node_id": "MDQ6VXNlcjEzMDkxNzc=",
"organizations_url": "https://api.github.com/users/charliermarsh/orgs",
"received_events_url": "https://api.github.com/users/charliermarsh/received_events",
"repos_url": "https://api.github.com/users/charliermarsh/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/charliermarsh/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/charliermarsh/subscriptions",
"type": "User",
"url": "https://api.github.com/users/charliermarsh",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 1 | 2024-04-16T01:12:36Z | 2024-04-16T01:14:16Z | 2024-04-16T01:12:41Z | NONE | null | null | null | null | {
"avatar_url": "https://avatars.githubusercontent.com/u/1309177?v=4",
"events_url": "https://api.github.com/users/charliermarsh/events{/privacy}",
"followers_url": "https://api.github.com/users/charliermarsh/followers",
"following_url": "https://api.github.com/users/charliermarsh/following{/other_user}",
"gists_url": "https://api.github.com/users/charliermarsh/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/charliermarsh",
"id": 1309177,
"login": "charliermarsh",
"node_id": "MDQ6VXNlcjEzMDkxNzc=",
"organizations_url": "https://api.github.com/users/charliermarsh/orgs",
"received_events_url": "https://api.github.com/users/charliermarsh/received_events",
"repos_url": "https://api.github.com/users/charliermarsh/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/charliermarsh/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/charliermarsh/subscriptions",
"type": "User",
"url": "https://api.github.com/users/charliermarsh",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6812/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6812/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6812.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6812",
"merged_at": null,
"patch_url": "https://github.com/huggingface/datasets/pull/6812.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6812"
} |
https://api.github.com/repos/huggingface/datasets/issues/6811 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6811/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6811/comments | https://api.github.com/repos/huggingface/datasets/issues/6811/events | https://github.com/huggingface/datasets/pull/6811 | 2,243,656,096 | PR_kwDODunzps5srOtR | 6,811 | add allow_primitive_to_str and allow_decimal_to_str instead of allow_number_to_str | {
"avatar_url": "https://avatars.githubusercontent.com/u/37351874?v=4",
"events_url": "https://api.github.com/users/Modexus/events{/privacy}",
"followers_url": "https://api.github.com/users/Modexus/followers",
"following_url": "https://api.github.com/users/Modexus/following{/other_user}",
"gists_url": "https://api.github.com/users/Modexus/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/Modexus",
"id": 37351874,
"login": "Modexus",
"node_id": "MDQ6VXNlcjM3MzUxODc0",
"organizations_url": "https://api.github.com/users/Modexus/orgs",
"received_events_url": "https://api.github.com/users/Modexus/received_events",
"repos_url": "https://api.github.com/users/Modexus/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/Modexus/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Modexus/subscriptions",
"type": "User",
"url": "https://api.github.com/users/Modexus",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 6 | 2024-04-15T13:14:38Z | 2024-07-03T14:59:42Z | 2024-04-16T17:03:17Z | CONTRIBUTOR | null | null | null | Fix #6805 | {
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/lhoestq",
"id": 42851186,
"login": "lhoestq",
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"type": "User",
"url": "https://api.github.com/users/lhoestq",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6811/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6811/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6811.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6811",
"merged_at": "2024-04-16T17:03:17Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6811.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6811"
} |
https://api.github.com/repos/huggingface/datasets/issues/6810 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6810/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6810/comments | https://api.github.com/repos/huggingface/datasets/issues/6810/events | https://github.com/huggingface/datasets/issues/6810 | 2,242,968,745 | I_kwDODunzps6FsPyp | 6,810 | Allow deleting a subset/config from a no-script dataset | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | [
{
"color": "a2eeef",
"default": true,
"description": "New feature or request",
"id": 1935892871,
"name": "enhancement",
"node_id": "MDU6TGFiZWwxOTM1ODkyODcx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement"
}
] | closed | false | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | [
{
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
}
] | null | 3 | 2024-04-15T07:53:26Z | 2025-01-11T18:40:40Z | 2024-04-30T09:44:25Z | MEMBER | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | As proposed by @BramVanroy, it would be neat to have this functionality through the API. | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6810/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6810/timeline | null | completed | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6809 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6809/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6809/comments | https://api.github.com/repos/huggingface/datasets/issues/6809/events | https://github.com/huggingface/datasets/pull/6809 | 2,242,956,297 | PR_kwDODunzps5so0e2 | 6,809 | Make convert_to_parquet CLI command create script branch | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 3 | 2024-04-15T07:47:26Z | 2024-04-17T08:44:26Z | 2024-04-17T08:38:18Z | MEMBER | null | null | null | Make convert_to_parquet CLI command create a "script" branch and keep the script file on it.
This PR proposes the simplest UX approach: whenever `--revision` is not explicitly passed (i.e., when the script is in the main branch), try to create a "script" branch from the "main" branch; if the "script" branch exists already, then do nothing.
Follow-up of:
- #6795
Close #6808.
CC: @severo | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6809/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6809/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6809.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6809",
"merged_at": "2024-04-17T08:38:18Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6809.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6809"
} |
https://api.github.com/repos/huggingface/datasets/issues/6808 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6808/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6808/comments | https://api.github.com/repos/huggingface/datasets/issues/6808/events | https://github.com/huggingface/datasets/issues/6808 | 2,242,843,611 | I_kwDODunzps6FrxPb | 6,808 | Make convert_to_parquet CLI command create script branch | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | [
{
"color": "a2eeef",
"default": true,
"description": "New feature or request",
"id": 1935892871,
"name": "enhancement",
"node_id": "MDU6TGFiZWwxOTM1ODkyODcx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement"
}
] | closed | false | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | [
{
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
}
] | null | 0 | 2024-04-15T06:46:07Z | 2024-04-17T08:38:19Z | 2024-04-17T08:38:19Z | MEMBER | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | As proposed by @severo, maybe we should add this functionality as well to the CLI command to convert a script-dataset to Parquet. See: https://github.com/huggingface/datasets/pull/6795#discussion_r1562819168
> When providing support, we sometimes suggest that users store their script in a script branch. What do you think of this alternative to deleting the files? | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6808/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6808/timeline | null | completed | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6806 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6806/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6806/comments | https://api.github.com/repos/huggingface/datasets/issues/6806/events | https://github.com/huggingface/datasets/pull/6806 | 2,239,435,074 | PR_kwDODunzps5sc8Mb | 6,806 | Fix hf-internal-testing/dataset_with_script commit SHA in CI test | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 2 | 2024-04-12T08:47:50Z | 2024-04-12T09:08:23Z | 2024-04-12T09:02:12Z | MEMBER | null | null | null | Fix test using latest commit SHA in hf-internal-testing/dataset_with_script dataset: https://huggingface.co/datasets/hf-internal-testing/dataset_with_script/commits/refs%2Fconvert%2Fparquet
Fix #6796. | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6806/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6806/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6806.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6806",
"merged_at": "2024-04-12T09:02:12Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6806.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6806"
} |
https://api.github.com/repos/huggingface/datasets/issues/6805 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6805/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6805/comments | https://api.github.com/repos/huggingface/datasets/issues/6805/events | https://github.com/huggingface/datasets/issues/6805 | 2,239,034,951 | I_kwDODunzps6FdPZH | 6,805 | Batched mapping of existing string column casts boolean to string | {
"avatar_url": "https://avatars.githubusercontent.com/u/46891489?v=4",
"events_url": "https://api.github.com/users/starmpcc/events{/privacy}",
"followers_url": "https://api.github.com/users/starmpcc/followers",
"following_url": "https://api.github.com/users/starmpcc/following{/other_user}",
"gists_url": "https://api.github.com/users/starmpcc/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/starmpcc",
"id": 46891489,
"login": "starmpcc",
"node_id": "MDQ6VXNlcjQ2ODkxNDg5",
"organizations_url": "https://api.github.com/users/starmpcc/orgs",
"received_events_url": "https://api.github.com/users/starmpcc/received_events",
"repos_url": "https://api.github.com/users/starmpcc/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/starmpcc/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/starmpcc/subscriptions",
"type": "User",
"url": "https://api.github.com/users/starmpcc",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 7 | 2024-04-12T04:21:41Z | 2024-07-03T15:00:07Z | 2024-07-03T15:00:07Z | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
Let the dataset contain a column named 'a', which is of the string type.
If 'a' is converted to a boolean using batched mapping, the mapper automatically casts the boolean to a string (e.g., True -> 'true').
It only happens when the original column and the mapped column name are identical.
Thank you!
### Steps to reproduce the bug
```python
from datasets import Dataset
dset = Dataset.from_dict({'a': ['11', '22']})
dset = dset.map(lambda x: {'a': [True for _ in x['a']]}, batched=True)
print(dset['a'])
```
```
> ['true', 'true']
```
### Expected behavior
[True, True]
### Environment info
- `datasets` version: 2.18.0
- Platform: Linux-5.4.0-148-generic-x86_64-with-glibc2.31
- Python version: 3.10.13
- `huggingface_hub` version: 0.21.4
- PyArrow version: 15.0.2
- Pandas version: 2.2.1
- `fsspec` version: 2023.12.2 | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6805/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6805/timeline | null | completed | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6804 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6804/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6804/comments | https://api.github.com/repos/huggingface/datasets/issues/6804/events | https://github.com/huggingface/datasets/pull/6804 | 2,238,035,124 | PR_kwDODunzps5sYJFF | 6,804 | Fix --repo-type order in cli upload docs | {
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/lhoestq",
"id": 42851186,
"login": "lhoestq",
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"type": "User",
"url": "https://api.github.com/users/lhoestq",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 2 | 2024-04-11T15:39:09Z | 2024-04-11T16:24:57Z | 2024-04-11T16:18:47Z | MEMBER | null | null | null | null | {
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/lhoestq",
"id": 42851186,
"login": "lhoestq",
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"type": "User",
"url": "https://api.github.com/users/lhoestq",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6804/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6804/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6804.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6804",
"merged_at": "2024-04-11T16:18:47Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6804.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6804"
} |
https://api.github.com/repos/huggingface/datasets/issues/6803 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6803/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6803/comments | https://api.github.com/repos/huggingface/datasets/issues/6803/events | https://github.com/huggingface/datasets/pull/6803 | 2,237,933,090 | PR_kwDODunzps5sXyct | 6,803 | #6791 Improve type checking around FAISS | {
"avatar_url": "https://avatars.githubusercontent.com/u/8976546?v=4",
"events_url": "https://api.github.com/users/Dref360/events{/privacy}",
"followers_url": "https://api.github.com/users/Dref360/followers",
"following_url": "https://api.github.com/users/Dref360/following{/other_user}",
"gists_url": "https://api.github.com/users/Dref360/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/Dref360",
"id": 8976546,
"login": "Dref360",
"node_id": "MDQ6VXNlcjg5NzY1NDY=",
"organizations_url": "https://api.github.com/users/Dref360/orgs",
"received_events_url": "https://api.github.com/users/Dref360/received_events",
"repos_url": "https://api.github.com/users/Dref360/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/Dref360/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Dref360/subscriptions",
"type": "User",
"url": "https://api.github.com/users/Dref360",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 3 | 2024-04-11T14:54:30Z | 2024-04-11T15:44:09Z | 2024-04-11T15:38:04Z | CONTRIBUTOR | null | null | null | Fixes #6791
Small PR to raise a better error when a dataset is not embedded properly. | {
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mariosasko",
"id": 47462742,
"login": "mariosasko",
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mariosasko",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6803/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6803/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6803.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6803",
"merged_at": "2024-04-11T15:38:04Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6803.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6803"
} |
https://api.github.com/repos/huggingface/datasets/issues/6802 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6802/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6802/comments | https://api.github.com/repos/huggingface/datasets/issues/6802/events | https://github.com/huggingface/datasets/pull/6802 | 2,237,365,489 | PR_kwDODunzps5sV0m8 | 6,802 | Fix typo in docs (upload CLI) | {
"avatar_url": "https://avatars.githubusercontent.com/u/11801849?v=4",
"events_url": "https://api.github.com/users/Wauplin/events{/privacy}",
"followers_url": "https://api.github.com/users/Wauplin/followers",
"following_url": "https://api.github.com/users/Wauplin/following{/other_user}",
"gists_url": "https://api.github.com/users/Wauplin/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/Wauplin",
"id": 11801849,
"login": "Wauplin",
"node_id": "MDQ6VXNlcjExODAxODQ5",
"organizations_url": "https://api.github.com/users/Wauplin/orgs",
"received_events_url": "https://api.github.com/users/Wauplin/received_events",
"repos_url": "https://api.github.com/users/Wauplin/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/Wauplin/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Wauplin/subscriptions",
"type": "User",
"url": "https://api.github.com/users/Wauplin",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 4 | 2024-04-11T10:05:05Z | 2024-04-11T16:19:00Z | 2024-04-11T13:19:43Z | CONTRIBUTOR | null | null | null | Related to https://huggingface.slack.com/archives/C04RG8YRVB8/p1712643948574129 (interal)
Positional args must be placed before optional args.
Feel free to merge whenever it's ready. | {
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/lhoestq",
"id": 42851186,
"login": "lhoestq",
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"type": "User",
"url": "https://api.github.com/users/lhoestq",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6802/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6802/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6802.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6802",
"merged_at": "2024-04-11T13:19:43Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6802.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6802"
} |
https://api.github.com/repos/huggingface/datasets/issues/6801 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6801/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6801/comments | https://api.github.com/repos/huggingface/datasets/issues/6801/events | https://github.com/huggingface/datasets/issues/6801 | 2,236,911,556 | I_kwDODunzps6FVI_E | 6,801 | got fileNotFound | {
"avatar_url": "https://avatars.githubusercontent.com/u/93729155?v=4",
"events_url": "https://api.github.com/users/laoniandisko/events{/privacy}",
"followers_url": "https://api.github.com/users/laoniandisko/followers",
"following_url": "https://api.github.com/users/laoniandisko/following{/other_user}",
"gists_url": "https://api.github.com/users/laoniandisko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/laoniandisko",
"id": 93729155,
"login": "laoniandisko",
"node_id": "U_kgDOBZYxgw",
"organizations_url": "https://api.github.com/users/laoniandisko/orgs",
"received_events_url": "https://api.github.com/users/laoniandisko/received_events",
"repos_url": "https://api.github.com/users/laoniandisko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/laoniandisko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/laoniandisko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/laoniandisko",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 2 | 2024-04-11T04:57:41Z | 2024-04-12T16:47:43Z | 2024-04-12T16:47:43Z | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
When I use load_dataset to load the nyanko7/danbooru2023 data set, the cache is read in the form of a symlink. There may be a problem with the arrow_dataset initialization process and I get FileNotFoundError: [Errno 2] No such file or directory: '2945000.jpg'
### Steps to reproduce the bug
#code show as below
from datasets import load_dataset
data = load_dataset("nyanko7/danbooru2023",cache_dir=<symlink>)
data["train"][0]
### Expected behavior
I should get this result:
{'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=365x256 at 0x7FB730CB4070>, 'label': 0}
### Environment info
datasets==2.12.0
python==3.10.14
| {
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mariosasko",
"id": 47462742,
"login": "mariosasko",
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mariosasko",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6801/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6801/timeline | null | completed | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6800 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6800/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6800/comments | https://api.github.com/repos/huggingface/datasets/issues/6800/events | https://github.com/huggingface/datasets/issues/6800 | 2,236,431,288 | I_kwDODunzps6FTTu4 | 6,800 | High overhead when loading lots of subsets from the same dataset | {
"avatar_url": "https://avatars.githubusercontent.com/u/53355258?v=4",
"events_url": "https://api.github.com/users/loicmagne/events{/privacy}",
"followers_url": "https://api.github.com/users/loicmagne/followers",
"following_url": "https://api.github.com/users/loicmagne/following{/other_user}",
"gists_url": "https://api.github.com/users/loicmagne/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/loicmagne",
"id": 53355258,
"login": "loicmagne",
"node_id": "MDQ6VXNlcjUzMzU1MjU4",
"organizations_url": "https://api.github.com/users/loicmagne/orgs",
"received_events_url": "https://api.github.com/users/loicmagne/received_events",
"repos_url": "https://api.github.com/users/loicmagne/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/loicmagne/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/loicmagne/subscriptions",
"type": "User",
"url": "https://api.github.com/users/loicmagne",
"user_view_type": "public"
} | [] | open | false | null | [] | null | 6 | 2024-04-10T21:08:57Z | 2024-04-24T13:48:05Z | null | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
I have a multilingual dataset that contains a lot of subsets. Each subset corresponds to a pair of languages, you can see here an example with 250 subsets: [https://hf.co/datasets/loicmagne/open-subtitles-250-bitext-mining](). As part of the MTEB benchmark, we may need to load all the subsets of the dataset. The dataset is relatively small and contains only ~45MB of data, but when I try to load every subset, it takes 15 minutes from the HF hub and 13 minutes from the cache
This issue https://github.com/huggingface/datasets/issues/5499 also referenced this overhead, but I'm wondering if there is anything I can do to speedup loading different subsets of the same dataset, both when loading from disk and from the HF hub? Currently each subset is stored in a jsonl file
### Steps to reproduce the bug
```
from datasets import load_dataset
for subset in ['ka-ml', 'br-sr', 'bg-br', 'kk-lv', 'br-sk', 'br-fi', 'eu-ze_zh', 'kk-nl', 'kk-vi', 'ja-kk', 'br-sv', 'kk-zh_cn', 'kk-ms', 'br-et', 'br-hu', 'eo-kk', 'br-tr', 'ko-tl', 'te-zh_tw', 'br-hr', 'br-nl', 'ka-si', 'br-cs', 'br-is', 'br-ro', 'br-de', 'et-kk', 'fr-hy', 'br-no', 'is-ko', 'br-da', 'br-en', 'eo-lt', 'is-ze_zh', 'eu-ko', 'br-it', 'br-id', 'eu-zh_cn', 'is-ja', 'br-sl', 'br-gl', 'br-pt_br', 'br-es', 'br-pt', 'is-th', 'fa-is', 'br-ca', 'eu-ka', 'is-zh_cn', 'eu-ur', 'id-kk', 'br-sq', 'eu-ja', 'uk-ur', 'is-zh_tw', 'ka-ko', 'eu-zh_tw', 'eu-th', 'eu-is', 'is-tl', 'br-eo', 'eo-ze_zh', 'eu-te', 'ar-kk', 'eo-lv', 'ko-ze_zh', 'ml-ze_zh', 'is-lt', 'br-fr', 'ko-te', 'kk-sl', 'eu-fa', 'eo-ko', 'ka-ze_en', 'eo-eu', 'ta-zh_tw', 'eu-lv', 'ko-lv', 'lt-tl', 'eu-si', 'hy-ru', 'ar-is', 'eu-lt', 'eu-tl', 'eu-uk', 'ka-ze_zh', 'si-ze_zh', 'el-is', 'bn-is', 'ko-ze_en', 'eo-si', 'cs-kk', 'is-uk', 'eu-ze_en', 'ta-ze_zh', 'is-pl', 'is-mk', 'eu-ta', 'ko-lt', 'is-lv', 'fa-ko', 'bn-ko', 'hi-is', 'bn-ze_zh', 'bn-eu', 'bn-ja', 'is-ml', 'eu-ru', 'ko-ta', 'is-vi', 'ja-tl', 'eu-mk', 'eu-he', 'ka-zh_tw', 'ka-zh_cn', 'si-tl', 'is-kk', 'eu-fi', 'fi-ko', 'is-ur', 'ka-th', 'ko-ur', 'eo-ja', 'he-is', 'is-tr', 'ka-ur', 'et-ko', 'eu-vi', 'is-sk', 'gl-is', 'fr-is', 'is-sq', 'hu-is', 'fr-kk', 'eu-sq', 'is-ru', 'ja-ka', 'fi-tl', 'ka-lv', 'fi-is', 'is-si', 'ar-ko', 'ko-sl', 'ar-eu', 'ko-si', 'bg-is', 'eu-hu', 'ko-sv', 'bn-hu', 'kk-ro', 'eu-hi', 'ka-ms', 'ko-th', 'ko-sr', 'ko-mk', 'fi-kk', 'ka-vi', 'eu-ml', 'ko-ml', 'de-ko', 'fa-ze_zh', 'eu-sk', 'is-sl', 'et-is', 'eo-is', 'is-sr', 'is-ze_en', 'kk-pt_br', 'hr-hy', 'kk-pl', 'ja-ta', 'is-ms', 'hi-ze_en', 'is-ro', 'ko-zh_cn', 'el-eu', 'ka-pl', 'ka-sq', 'eu-sl', 'fa-ka', 'ko-no', 'si-ze_en', 'ko-uk', 'ja-ze_zh', 'hu-ko', 'kk-no', 'eu-pl', 'is-pt_br', 'bn-lv', 'tl-zh_cn', 'is-nl', 'he-ko', 'ko-sq', 'ta-th', 'lt-ta', 'da-ko', 'ca-is', 'is-ta', 'bn-fi', 'ja-ml', 'lv-si', 'eu-sv', 'ja-te', 'bn-ur', 'bn-ca', 'bs-ko', 'bs-is', 'eu-sr', 'ko-vi', 'ko-zh_tw', 'et-tl', 'kk-tr', 'eo-vi', 'is-it', 'ja-ko', 'eo-et', 'id-is', 'bn-et', 'bs-eu', 'bn-lt', 'tl-uk', 'bn-zh_tw', 'da-eu', 'el-ko', 'no-tl', 'ko-sk', 'is-pt', 'hu-kk', 'si-zh_tw', 'si-te', 'ka-ru', 'lt-ml', 'af-ja', 'bg-eu', 'eo-th', 'cs-is', 'pl-ze_zh', 'el-kk', 'kk-sv', 'ka-nl', 'ko-pl', 'bg-ko', 'ka-pt_br', 'et-eu', 'tl-zh_tw', 'ka-pt', 'id-ko', 'fi-ze_zh', 'he-kk', 'ka-tr']:
load_dataset('loicmagne/open-subtitles-250-bitext-mining', subset)
```
### Expected behavior
Faster loading?
### Environment info
Copy-and-paste the text below in your GitHub issue.
- `datasets` version: 2.18.0
- Platform: Linux-6.5.0-27-generic-x86_64-with-glibc2.35
- Python version: 3.10.12
- `huggingface_hub` version: 0.22.2
- PyArrow version: 15.0.2
- Pandas version: 2.2.2
- `fsspec` version: 2023.5.0
| null | {
"+1": 1,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 1,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6800/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6800/timeline | null | null | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6799 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6799/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6799/comments | https://api.github.com/repos/huggingface/datasets/issues/6799/events | https://github.com/huggingface/datasets/pull/6799 | 2,236,124,531 | PR_kwDODunzps5sRk_r | 6,799 | fix `DatasetBuilder._split_generators` incomplete type annotation | {
"avatar_url": "https://avatars.githubusercontent.com/u/33965649?v=4",
"events_url": "https://api.github.com/users/JonasLoos/events{/privacy}",
"followers_url": "https://api.github.com/users/JonasLoos/followers",
"following_url": "https://api.github.com/users/JonasLoos/following{/other_user}",
"gists_url": "https://api.github.com/users/JonasLoos/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/JonasLoos",
"id": 33965649,
"login": "JonasLoos",
"node_id": "MDQ6VXNlcjMzOTY1NjQ5",
"organizations_url": "https://api.github.com/users/JonasLoos/orgs",
"received_events_url": "https://api.github.com/users/JonasLoos/received_events",
"repos_url": "https://api.github.com/users/JonasLoos/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/JonasLoos/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/JonasLoos/subscriptions",
"type": "User",
"url": "https://api.github.com/users/JonasLoos",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 3 | 2024-04-10T17:46:08Z | 2024-04-11T15:41:06Z | 2024-04-11T15:34:58Z | CONTRIBUTOR | null | null | null | solve #6798:
add missing `StreamingDownloadManager` type annotation to the `dl_manager` argument of the `DatasetBuilder._split_generators` function | {
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mariosasko",
"id": 47462742,
"login": "mariosasko",
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mariosasko",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6799/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6799/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6799.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6799",
"merged_at": "2024-04-11T15:34:58Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6799.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6799"
} |
https://api.github.com/repos/huggingface/datasets/issues/6798 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6798/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6798/comments | https://api.github.com/repos/huggingface/datasets/issues/6798/events | https://github.com/huggingface/datasets/issues/6798 | 2,235,768,891 | I_kwDODunzps6FQyA7 | 6,798 | `DatasetBuilder._split_generators` incomplete type annotation | {
"avatar_url": "https://avatars.githubusercontent.com/u/33965649?v=4",
"events_url": "https://api.github.com/users/JonasLoos/events{/privacy}",
"followers_url": "https://api.github.com/users/JonasLoos/followers",
"following_url": "https://api.github.com/users/JonasLoos/following{/other_user}",
"gists_url": "https://api.github.com/users/JonasLoos/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/JonasLoos",
"id": 33965649,
"login": "JonasLoos",
"node_id": "MDQ6VXNlcjMzOTY1NjQ5",
"organizations_url": "https://api.github.com/users/JonasLoos/orgs",
"received_events_url": "https://api.github.com/users/JonasLoos/received_events",
"repos_url": "https://api.github.com/users/JonasLoos/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/JonasLoos/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/JonasLoos/subscriptions",
"type": "User",
"url": "https://api.github.com/users/JonasLoos",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 3 | 2024-04-10T14:38:50Z | 2024-04-11T15:34:59Z | 2024-04-11T15:34:59Z | CONTRIBUTOR | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
The [`DatasetBuilder._split_generators`](https://github.com/huggingface/datasets/blob/0f27d7b77c73412cfc50b24354bfd7a3e838202f/src/datasets/builder.py#L1449) function has currently the following signature:
```python
class DatasetBuilder:
def _split_generators(self, dl_manager: DownloadManager):
...
```
However, the `dl_manager` argument can also be of type [`StreamingDownloadManager`](https://github.com/huggingface/datasets/blob/0f27d7b77c73412cfc50b24354bfd7a3e838202f/src/datasets/download/streaming_download_manager.py#L962), which has different functionality. For example, the `download` function doesn't download, but rather just returns the given url(s).
I suggest changing the function signature to:
```python
class DatasetBuilder:
def _split_generators(self, dl_manager: Union[DownloadManager, StreamingDownloadManager]):
...
```
and also adjust the docstring accordingly.
I would like to create a Pull Request to fix this, and have the following questions:
* Are there also other options than `DownloadManager`, and `StreamingDownloadManager`?
* Should this also be changed in other functions?
### Steps to reproduce the bug
Minimal example to print the different class names:
```python
import tempfile
from datasets import load_dataset
example = b'''
from datasets import GeneratorBasedBuilder, DatasetInfo, Features, Value, SplitGenerator
class Test(GeneratorBasedBuilder):
def _info(self):
return DatasetInfo(features=Features({"x": Value("int64")}))
def _split_generators(self, dl_manager):
print(type(dl_manager))
return [SplitGenerator('test')]
def _generate_examples(self):
yield 0, {'x': 42}
'''
with tempfile.NamedTemporaryFile(suffix='.py') as f:
f.write(example)
f.flush()
load_dataset(f.name, streaming=False)
load_dataset(f.name, streaming=True)
```
### Expected behavior
complete type annotations
### Environment info
/ | {
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mariosasko",
"id": 47462742,
"login": "mariosasko",
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mariosasko",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6798/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6798/timeline | null | completed | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6797 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6797/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6797/comments | https://api.github.com/repos/huggingface/datasets/issues/6797/events | https://github.com/huggingface/datasets/pull/6797 | 2,234,890,097 | PR_kwDODunzps5sNYKZ | 6,797 | Fix CI test_load_dataset_distributed_with_script | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 2 | 2024-04-10T06:57:48Z | 2024-04-10T08:25:00Z | 2024-04-10T08:18:01Z | MEMBER | null | null | null | Fix #6796. | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6797/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6797/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6797.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6797",
"merged_at": null,
"patch_url": "https://github.com/huggingface/datasets/pull/6797.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6797"
} |
https://api.github.com/repos/huggingface/datasets/issues/6796 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6796/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6796/comments | https://api.github.com/repos/huggingface/datasets/issues/6796/events | https://github.com/huggingface/datasets/issues/6796 | 2,234,887,618 | I_kwDODunzps6FNa3C | 6,796 | CI is broken due to hf-internal-testing/dataset_with_script | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | [
{
"color": "d73a4a",
"default": true,
"description": "Something isn't working",
"id": 1935892857,
"name": "bug",
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug"
}
] | closed | false | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | [
{
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
}
] | null | 4 | 2024-04-10T06:56:02Z | 2024-04-12T09:02:13Z | 2024-04-12T09:02:13Z | MEMBER | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | CI is broken for test_load_dataset_distributed_with_script. See: https://github.com/huggingface/datasets/actions/runs/8614926216/job/23609378127
```
FAILED tests/test_load.py::test_load_dataset_distributed_with_script[None] - assert False
+ where False = all(<generator object test_load_dataset_distributed_with_script.<locals>.<genexpr> at 0x7f0c741de3b0>)
FAILED tests/test_load.py::test_load_dataset_distributed_with_script[force_redownload] - assert False
+ where False = all(<generator object test_load_dataset_distributed_with_script.<locals>.<genexpr> at 0x7f0be45f6ea0>)
``` | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6796/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6796/timeline | null | completed | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6795 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6795/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6795/comments | https://api.github.com/repos/huggingface/datasets/issues/6795/events | https://github.com/huggingface/datasets/pull/6795 | 2,233,618,719 | PR_kwDODunzps5sJAC8 | 6,795 | Add CLI function to convert script-dataset to Parquet | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 3 | 2024-04-09T14:45:12Z | 2024-04-17T08:41:23Z | 2024-04-12T15:27:04Z | MEMBER | null | null | null | Close #6690. | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6795/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6795/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6795.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6795",
"merged_at": "2024-04-12T15:27:04Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6795.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6795"
} |
https://api.github.com/repos/huggingface/datasets/issues/6794 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6794/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6794/comments | https://api.github.com/repos/huggingface/datasets/issues/6794/events | https://github.com/huggingface/datasets/pull/6794 | 2,233,202,088 | PR_kwDODunzps5sHkJF | 6,794 | Multithreaded downloads | {
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/lhoestq",
"id": 42851186,
"login": "lhoestq",
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"type": "User",
"url": "https://api.github.com/users/lhoestq",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 4 | 2024-04-09T11:13:19Z | 2024-04-15T21:24:13Z | 2024-04-15T21:18:08Z | MEMBER | null | null | null | ...for faster dataset download when there are many many small files (e.g. imagefolder, audiofolder)
### Behcnmark
for example on [lhoestq/tmp-images-writer_batch_size](https://hf.co/datasets/lhoestq/tmp-images-writer_batch_size) (128 images)
| | duration of the download step in `load_dataset()` |
|--| ----------------------------------------------------------------------|
| Before | 58s |
| Now | 3s |
This should fix issues with the Dataset Viewer taking too much time to show up for imagefolder/audiofolder datasets.
### Implementation details
The main change is in the `DownloadManager`:
```diff
- download_func = partial(self._download, download_config=download_config)
+ download_func = partial(self._download_batched, download_config=download_config)
downloaded_path_or_paths = map_nested(
download_func,
url_or_urls,
map_tuple=True,
num_proc=download_config.num_proc,
desc="Downloading data files",
+ batched=True,
+ batch_size=-1,
)
```
and `_download_batched` is a multithreaded function.
I only enable multithreading if there are more than 16 files and files are small though, otherwise the progress bar that counts the number of downloaded files is not fluid (updating when a big batch of big files are done downloading). To do so I simply check if the first file is smaller than 20MB.
I also had to tweak `map_nested` to support batching. In particular it slices the data correctly if the user also enables multiprocessing. | {
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/lhoestq",
"id": 42851186,
"login": "lhoestq",
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"type": "User",
"url": "https://api.github.com/users/lhoestq",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 1,
"laugh": 0,
"rocket": 0,
"total_count": 1,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6794/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6794/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6794.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6794",
"merged_at": "2024-04-15T21:18:08Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6794.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6794"
} |
https://api.github.com/repos/huggingface/datasets/issues/6793 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6793/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6793/comments | https://api.github.com/repos/huggingface/datasets/issues/6793/events | https://github.com/huggingface/datasets/issues/6793 | 2,231,400,200 | I_kwDODunzps6FAHcI | 6,793 | Loading just one particular split is not possible for imagenet-1k | {
"avatar_url": "https://avatars.githubusercontent.com/u/165930106?v=4",
"events_url": "https://api.github.com/users/PaulPSta/events{/privacy}",
"followers_url": "https://api.github.com/users/PaulPSta/followers",
"following_url": "https://api.github.com/users/PaulPSta/following{/other_user}",
"gists_url": "https://api.github.com/users/PaulPSta/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/PaulPSta",
"id": 165930106,
"login": "PaulPSta",
"node_id": "U_kgDOCePkeg",
"organizations_url": "https://api.github.com/users/PaulPSta/orgs",
"received_events_url": "https://api.github.com/users/PaulPSta/received_events",
"repos_url": "https://api.github.com/users/PaulPSta/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/PaulPSta/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/PaulPSta/subscriptions",
"type": "User",
"url": "https://api.github.com/users/PaulPSta",
"user_view_type": "public"
} | [] | open | false | null | [] | null | 2 | 2024-04-08T14:39:14Z | 2025-06-23T09:55:08Z | null | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
I'd expect the following code to download just the validation split but instead I get all data on my disk (train, test and validation splits)
`
from datasets import load_dataset
dataset = load_dataset("imagenet-1k", split="validation", trust_remote_code=True)
`
Is it expected to work like that?
### Steps to reproduce the bug
1. Install the required libraries (python, datasets, huggingface_hub)
2. Login using huggingface cli
2. Run the code in the description
### Expected behavior
Just a single (validation) split should be downloaded.
### Environment info
python: 3.12.2
datasets: 2.18.0
huggingface_hub: 0.22.2 | null | {
"+1": 5,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 5,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6793/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6793/timeline | null | null | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6792 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6792/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6792/comments | https://api.github.com/repos/huggingface/datasets/issues/6792/events | https://github.com/huggingface/datasets/pull/6792 | 2,231,318,682 | PR_kwDODunzps5sBEyn | 6,792 | Fix cache conflict in `_check_legacy_cache2` | {
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/lhoestq",
"id": 42851186,
"login": "lhoestq",
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"type": "User",
"url": "https://api.github.com/users/lhoestq",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 2 | 2024-04-08T14:05:42Z | 2024-04-09T11:34:08Z | 2024-04-09T11:27:58Z | MEMBER | null | null | null | It was reloading from the wrong cache dir because of a bug in `_check_legacy_cache2`. This function should not trigger if there are config_kwars like `sample_by=`
fix https://github.com/huggingface/datasets/issues/6758 | {
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/lhoestq",
"id": 42851186,
"login": "lhoestq",
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"type": "User",
"url": "https://api.github.com/users/lhoestq",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6792/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6792/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6792.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6792",
"merged_at": "2024-04-09T11:27:57Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6792.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6792"
} |
https://api.github.com/repos/huggingface/datasets/issues/6791 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6791/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6791/comments | https://api.github.com/repos/huggingface/datasets/issues/6791/events | https://github.com/huggingface/datasets/issues/6791 | 2,230,102,332 | I_kwDODunzps6E7Kk8 | 6,791 | `add_faiss_index` raises ValueError: not enough values to unpack (expected 2, got 1) | {
"avatar_url": "https://avatars.githubusercontent.com/u/40491005?v=4",
"events_url": "https://api.github.com/users/NeuralFlux/events{/privacy}",
"followers_url": "https://api.github.com/users/NeuralFlux/followers",
"following_url": "https://api.github.com/users/NeuralFlux/following{/other_user}",
"gists_url": "https://api.github.com/users/NeuralFlux/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/NeuralFlux",
"id": 40491005,
"login": "NeuralFlux",
"node_id": "MDQ6VXNlcjQwNDkxMDA1",
"organizations_url": "https://api.github.com/users/NeuralFlux/orgs",
"received_events_url": "https://api.github.com/users/NeuralFlux/received_events",
"repos_url": "https://api.github.com/users/NeuralFlux/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/NeuralFlux/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/NeuralFlux/subscriptions",
"type": "User",
"url": "https://api.github.com/users/NeuralFlux",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 3 | 2024-04-08T01:57:03Z | 2024-04-11T15:38:05Z | 2024-04-11T15:38:05Z | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
Calling `add_faiss_index` on a `Dataset` with a column argument raises a ValueError. The following is the trace
```python
214 def replacement_add(self, x):
215 """Adds vectors to the index.
216 The index must be trained before vectors can be added to it.
217 The vectors are implicitly numbered in sequence. When `n` vectors are
(...)
224 `dtype` must be float32.
225 """
--> 227 n, d = x.shape
228 assert d == self.d
229 x = np.ascontiguousarray(x, dtype='float32')
ValueError: not enough values to unpack (expected 2, got 1)
```
### Steps to reproduce the bug
1. Load any dataset like `ds = datasets.load_dataset("wikimedia/wikipedia", "20231101.en")["train"]`
2. Add an FAISS index on any column `ds.add_faiss_index('title')`
### Expected behavior
The index should be created
### Environment info
- `datasets` version: 2.18.0
- Platform: Linux-6.5.0-26-generic-x86_64-with-glibc2.35
- Python version: 3.9.19
- `huggingface_hub` version: 0.22.2
- PyArrow version: 15.0.2
- Pandas version: 2.2.1
- `fsspec` version: 2024.2.0
- `faiss-cpu` version: 1.8.0 | {
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mariosasko",
"id": 47462742,
"login": "mariosasko",
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mariosasko",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6791/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6791/timeline | null | completed | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6790 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6790/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6790/comments | https://api.github.com/repos/huggingface/datasets/issues/6790/events | https://github.com/huggingface/datasets/issues/6790 | 2,229,915,236 | I_kwDODunzps6E6c5k | 6,790 | PyArrow 'Memory mapping file failed: Cannot allocate memory' bug | {
"avatar_url": "https://avatars.githubusercontent.com/u/25725697?v=4",
"events_url": "https://api.github.com/users/lasuomela/events{/privacy}",
"followers_url": "https://api.github.com/users/lasuomela/followers",
"following_url": "https://api.github.com/users/lasuomela/following{/other_user}",
"gists_url": "https://api.github.com/users/lasuomela/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/lasuomela",
"id": 25725697,
"login": "lasuomela",
"node_id": "MDQ6VXNlcjI1NzI1Njk3",
"organizations_url": "https://api.github.com/users/lasuomela/orgs",
"received_events_url": "https://api.github.com/users/lasuomela/received_events",
"repos_url": "https://api.github.com/users/lasuomela/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/lasuomela/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lasuomela/subscriptions",
"type": "User",
"url": "https://api.github.com/users/lasuomela",
"user_view_type": "public"
} | [] | open | false | null | [] | null | 3 | 2024-04-07T19:25:39Z | 2025-06-12T07:31:44Z | null | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
Hello,
I've been struggling with a problem using Huggingface datasets caused by PyArrow memory allocation. I finally managed to solve it, and thought to document it since similar issues have been raised here before (https://github.com/huggingface/datasets/issues/5710, https://github.com/huggingface/datasets/issues/6176).
In my case, I was trying to load ~70k dataset files from disk using `datasets.load_from_disk(data_path)` (meaning 70k repeated calls to load_from_disk). This triggered an (uninformative) exception around 64k loaded files:
```
File "pyarrow/io.pxi", line 1053, in pyarrow.lib.memory_map
File "pyarrow/io.pxi", line 1000, in pyarrow.lib.MemoryMappedFile._open
File "pyarrow/error.pxi", line 154, in pyarrow.lib.pyarrow_internal_check_status
File "pyarrow/error.pxi", line 91, in pyarrow.lib.check_status
OSError: Memory mapping file failed: Cannot allocate memory
```
Despite system RAM usage being very low. After a lot of digging around, I discovered that my Ubuntu machine had a limit on the maximum number of memory mapped files in `/proc/sys/vm/max_map_count` set to 65530, which was causing my data loader to crash. Increasing the limit in the file (`echo <new_mmap_size> | sudo tee /proc/sys/vm/max_map_count`) made the issue go away.
While this isn't a bug as such in either Datasets or PyArrow, this behavior can be very confusing to users. Maybe this should be mentioned in documentation? I suspect the other issues raised here about memory mapping OOM errors could actually be consequence of system configuration.
Br,
Lauri
### Steps to reproduce the bug
```
import numpy as np
import pyarrow as pa
import tqdm
# Write some data to disk
arr = pa.array(np.arange(100))
schema = pa.schema([
pa.field('nums', arr.type)
])
with pa.OSFile('arraydata.arrow', 'wb') as sink:
with pa.ipc.new_file(sink, schema=schema) as writer:
batch = pa.record_batch([arr], schema=schema)
writer.write(batch)
# Number of times to open the memory map
nums = 70000
# Read the data back
arrays = [pa.memory_map('arraydata.arrow', 'r') for _ in tqdm.tqdm(range(nums))]
```
### Expected behavior
No errors.
### Environment info
datasets: 2.18.0
pyarrow: 15.0.0 | null | {
"+1": 3,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 3,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6790/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6790/timeline | null | null | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6789 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6789/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6789/comments | https://api.github.com/repos/huggingface/datasets/issues/6789/events | https://github.com/huggingface/datasets/issues/6789 | 2,229,527,001 | I_kwDODunzps6E4-HZ | 6,789 | Issue with map | {
"avatar_url": "https://avatars.githubusercontent.com/u/102672238?v=4",
"events_url": "https://api.github.com/users/Nsohko/events{/privacy}",
"followers_url": "https://api.github.com/users/Nsohko/followers",
"following_url": "https://api.github.com/users/Nsohko/following{/other_user}",
"gists_url": "https://api.github.com/users/Nsohko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/Nsohko",
"id": 102672238,
"login": "Nsohko",
"node_id": "U_kgDOBh6nbg",
"organizations_url": "https://api.github.com/users/Nsohko/orgs",
"received_events_url": "https://api.github.com/users/Nsohko/received_events",
"repos_url": "https://api.github.com/users/Nsohko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/Nsohko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Nsohko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/Nsohko",
"user_view_type": "public"
} | [] | open | false | null | [] | null | 8 | 2024-04-07T02:52:06Z | 2024-07-23T12:41:38Z | null | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
Map has been taking extremely long to preprocess my data.
It seems to process 1000 examples (which it does really fast in about 10 seconds), then it hangs for a good 1-2 minutes, before it moves on to the next batch of 1000 examples.
It also keeps eating up my hard drive space for some reason by creating a file named tmp1335llua that is over 300GB.
Trying to set num_proc to be >1 also gives me the following error: NameError: name 'processor' is not defined
Please advise on how I could optimise this?
### Steps to reproduce the bug
In general, I have been using map as per normal. Here is a snippet of my code:
````
########################### DATASET LOADING AND PREP #########################
def load_custom_dataset(split):
ds = []
if split == 'train':
for dset in args.train_datasets:
ds.append(load_from_disk(dset))
if split == 'test':
for dset in args.test_datasets:
ds.append(load_from_disk(dset))
ds_to_return = concatenate_datasets(ds)
ds_to_return = ds_to_return.shuffle(seed=22)
return ds_to_return
def prepare_dataset(batch):
# load and (possibly) resample audio data to 16kHz
audio = batch["audio"]
# compute log-Mel input features from input audio array
batch["input_features"] = processor.feature_extractor(audio["array"], sampling_rate=audio["sampling_rate"]).input_features[0]
# compute input length of audio sample in seconds
batch["input_length"] = len(audio["array"]) / audio["sampling_rate"]
# optional pre-processing steps
transcription = batch["sentence"]
if do_lower_case:
transcription = transcription.lower()
if do_remove_punctuation:
transcription = normalizer(transcription).strip()
# encode target text to label ids
batch["labels"] = processor.tokenizer(transcription).input_ids
return batch
print('DATASET PREPARATION IN PROGRESS...')
# case 3: combine_and_shuffle is true, only train provided
# load train datasets
train_set = load_custom_dataset('train')
# split dataset
raw_dataset = DatasetDict()
raw_dataset = train_set.train_test_split(test_size = args.test_size, shuffle=True, seed=42)
raw_dataset = raw_dataset.cast_column("audio", Audio(sampling_rate=args.sampling_rate))
print("Before Map:")
print(raw_dataset)
raw_dataset = raw_dataset.map(prepare_dataset, num_proc=1)
print("After Map:")
print(raw_dataset)
````
### Expected behavior
Based on the speed at which map is processing examples, I would expect a 5-6 hours completion for all mapping
However, because it hangs every 1000 examples, I instead roughly estimate it would take about 40 hours!
Moreover, i cant even finish the map because it keeps exponentially eating up my hard drive space
### Environment info
- `datasets` version: 2.18.0
- Platform: Windows-10-10.0.22631-SP0
- Python version: 3.10.14
- `huggingface_hub` version: 0.22.2
- PyArrow version: 15.0.2
- Pandas version: 2.2.1
- `fsspec` version: 2024.2.0 | null | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6789/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6789/timeline | null | null | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6788 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6788/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6788/comments | https://api.github.com/repos/huggingface/datasets/issues/6788/events | https://github.com/huggingface/datasets/issues/6788 | 2,229,207,521 | I_kwDODunzps6E3wHh | 6,788 | A Question About the Map Function | {
"avatar_url": "https://avatars.githubusercontent.com/u/87431052?v=4",
"events_url": "https://api.github.com/users/lifeprompter/events{/privacy}",
"followers_url": "https://api.github.com/users/lifeprompter/followers",
"following_url": "https://api.github.com/users/lifeprompter/following{/other_user}",
"gists_url": "https://api.github.com/users/lifeprompter/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/lifeprompter",
"id": 87431052,
"login": "lifeprompter",
"node_id": "MDQ6VXNlcjg3NDMxMDUy",
"organizations_url": "https://api.github.com/users/lifeprompter/orgs",
"received_events_url": "https://api.github.com/users/lifeprompter/received_events",
"repos_url": "https://api.github.com/users/lifeprompter/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/lifeprompter/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lifeprompter/subscriptions",
"type": "User",
"url": "https://api.github.com/users/lifeprompter",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 2 | 2024-04-06T11:45:23Z | 2024-04-11T05:29:35Z | 2024-04-11T05:29:35Z | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
Hello,
I have a question regarding the map function in the Hugging Face datasets.
The situation is as follows: when I load a jsonl file using load_dataset(..., streaming=False), and then utilize the map function to process it, I specify that the returned example should be of type Torch.tensor. However, I noticed that after applying the map function, the datatype automatically changes to List, which leads to errors in my program.
I attempted to use load_dataset(..., streaming=True), and this issue no longer occurs. I'm not entirely clear on why this happens. Could you please provide some insights into this?
### Steps to reproduce the bug
1.dataset = load_dataset(xxx, streaming = False)
2. dataset.map(function), function will return torch.Tensor.
3. you will find the format of data in dataset is List.
### Expected behavior
I expected to receieve the format of data is torch.Tensor.
### Environment info
2.18.0 | {
"avatar_url": "https://avatars.githubusercontent.com/u/87431052?v=4",
"events_url": "https://api.github.com/users/lifeprompter/events{/privacy}",
"followers_url": "https://api.github.com/users/lifeprompter/followers",
"following_url": "https://api.github.com/users/lifeprompter/following{/other_user}",
"gists_url": "https://api.github.com/users/lifeprompter/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/lifeprompter",
"id": 87431052,
"login": "lifeprompter",
"node_id": "MDQ6VXNlcjg3NDMxMDUy",
"organizations_url": "https://api.github.com/users/lifeprompter/orgs",
"received_events_url": "https://api.github.com/users/lifeprompter/received_events",
"repos_url": "https://api.github.com/users/lifeprompter/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/lifeprompter/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lifeprompter/subscriptions",
"type": "User",
"url": "https://api.github.com/users/lifeprompter",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6788/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6788/timeline | null | completed | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6787 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6787/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6787/comments | https://api.github.com/repos/huggingface/datasets/issues/6787/events | https://github.com/huggingface/datasets/issues/6787 | 2,229,103,264 | I_kwDODunzps6E3Wqg | 6,787 | TimeoutError in map | {
"avatar_url": "https://avatars.githubusercontent.com/u/48146603?v=4",
"events_url": "https://api.github.com/users/Jiaxin-Wen/events{/privacy}",
"followers_url": "https://api.github.com/users/Jiaxin-Wen/followers",
"following_url": "https://api.github.com/users/Jiaxin-Wen/following{/other_user}",
"gists_url": "https://api.github.com/users/Jiaxin-Wen/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/Jiaxin-Wen",
"id": 48146603,
"login": "Jiaxin-Wen",
"node_id": "MDQ6VXNlcjQ4MTQ2NjAz",
"organizations_url": "https://api.github.com/users/Jiaxin-Wen/orgs",
"received_events_url": "https://api.github.com/users/Jiaxin-Wen/received_events",
"repos_url": "https://api.github.com/users/Jiaxin-Wen/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/Jiaxin-Wen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Jiaxin-Wen/subscriptions",
"type": "User",
"url": "https://api.github.com/users/Jiaxin-Wen",
"user_view_type": "public"
} | [] | open | false | null | [] | null | 7 | 2024-04-06T06:25:39Z | 2024-08-14T02:09:57Z | null | CONTRIBUTOR | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
```python
from datasets import Dataset
def worker(example):
while True:
continue
example['a'] = 100
return example
data = Dataset.from_list([{"a": 1}, {"a": 2}])
data = data.map(worker)
print(data[0])
```
I'm implementing a worker function whose runtime will depend on specific examples (e.g., while most examples take 0.01s in worker, several examples may take 50s).
Therefore, I would like to know how the current implementation will handle those subprocesses that require a long (e.g., >= 5min) or even infinite time.
I notice that the current implementation set a timeout of 0.05 second
https://github.com/huggingface/datasets/blob/c3ddb1ef00334a6f973679a51e783905fbc9ef0b/src/datasets/utils/py_utils.py#L674
However, this example code still gets stuck.
### Steps to reproduce the bug
run the example above
### Expected behavior
I want to set a default worker to handle these timeout cases, instead of getting stuck
### Environment info
main branch version | null | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6787/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6787/timeline | null | null | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6786 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6786/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6786/comments | https://api.github.com/repos/huggingface/datasets/issues/6786/events | https://github.com/huggingface/datasets/pull/6786 | 2,228,463,776 | PR_kwDODunzps5r3kWg | 6,786 | Make Image cast storage faster | {
"avatar_url": "https://avatars.githubusercontent.com/u/37351874?v=4",
"events_url": "https://api.github.com/users/Modexus/events{/privacy}",
"followers_url": "https://api.github.com/users/Modexus/followers",
"following_url": "https://api.github.com/users/Modexus/following{/other_user}",
"gists_url": "https://api.github.com/users/Modexus/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/Modexus",
"id": 37351874,
"login": "Modexus",
"node_id": "MDQ6VXNlcjM3MzUxODc0",
"organizations_url": "https://api.github.com/users/Modexus/orgs",
"received_events_url": "https://api.github.com/users/Modexus/received_events",
"repos_url": "https://api.github.com/users/Modexus/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/Modexus/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Modexus/subscriptions",
"type": "User",
"url": "https://api.github.com/users/Modexus",
"user_view_type": "public"
} | [] | open | false | null | [] | null | 8 | 2024-04-05T17:00:46Z | 2024-10-01T09:09:14Z | null | CONTRIBUTOR | null | null | null | PR for issue #6782.
Makes `cast_storage` of the `Image` class faster by removing the slow call to `.pylist`.
Instead directly convert each `ListArray` item to either `Array2DExtensionType` or `Array3DExtensionType`.
This also preserves the `dtype` removing the warning if the array is already `uint8`. | null | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6786/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6786/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6786.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6786",
"merged_at": null,
"patch_url": "https://github.com/huggingface/datasets/pull/6786.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6786"
} |
https://api.github.com/repos/huggingface/datasets/issues/6785 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6785/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6785/comments | https://api.github.com/repos/huggingface/datasets/issues/6785/events | https://github.com/huggingface/datasets/pull/6785 | 2,228,429,852 | PR_kwDODunzps5r3dCw | 6,785 | rename datasets-server to dataset-viewer | {
"avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4",
"events_url": "https://api.github.com/users/severo/events{/privacy}",
"followers_url": "https://api.github.com/users/severo/followers",
"following_url": "https://api.github.com/users/severo/following{/other_user}",
"gists_url": "https://api.github.com/users/severo/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/severo",
"id": 1676121,
"login": "severo",
"node_id": "MDQ6VXNlcjE2NzYxMjE=",
"organizations_url": "https://api.github.com/users/severo/orgs",
"received_events_url": "https://api.github.com/users/severo/received_events",
"repos_url": "https://api.github.com/users/severo/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/severo/subscriptions",
"type": "User",
"url": "https://api.github.com/users/severo",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 2 | 2024-04-05T16:37:05Z | 2024-04-08T12:41:13Z | 2024-04-08T12:35:02Z | COLLABORATOR | null | null | null | See https://github.com/huggingface/dataset-viewer/issues/2650
Tell me if it's OK, or if it's a breaking change that must be handled differently.
Also note that the docs page is still https://huggingface.co/docs/datasets-server/, so I didn't change it.
And the API URL is still https://datasets-server.huggingface.co/ (and [might always be](https://github.com/huggingface/dataset-viewer/issues/2666)), so I let it too. | {
"avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4",
"events_url": "https://api.github.com/users/severo/events{/privacy}",
"followers_url": "https://api.github.com/users/severo/followers",
"following_url": "https://api.github.com/users/severo/following{/other_user}",
"gists_url": "https://api.github.com/users/severo/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/severo",
"id": 1676121,
"login": "severo",
"node_id": "MDQ6VXNlcjE2NzYxMjE=",
"organizations_url": "https://api.github.com/users/severo/orgs",
"received_events_url": "https://api.github.com/users/severo/received_events",
"repos_url": "https://api.github.com/users/severo/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/severo/subscriptions",
"type": "User",
"url": "https://api.github.com/users/severo",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6785/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6785/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6785.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6785",
"merged_at": "2024-04-08T12:35:02Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6785.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6785"
} |
https://api.github.com/repos/huggingface/datasets/issues/6784 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6784/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6784/comments | https://api.github.com/repos/huggingface/datasets/issues/6784/events | https://github.com/huggingface/datasets/pull/6784 | 2,228,390,504 | PR_kwDODunzps5r3UTj | 6,784 | Extract data on the fly in packaged builders | {
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mariosasko",
"id": 47462742,
"login": "mariosasko",
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mariosasko",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 3 | 2024-04-05T16:12:25Z | 2024-04-16T16:37:47Z | 2024-04-16T16:31:29Z | COLLABORATOR | null | null | null | Instead of waiting for data files to be extracted in the packaged builders, we can prepend the compression prefix and extract them as they are being read (using `fsspec`). This saves disk space (deleting extracted archives is not set by default) and slightly speeds up dataset generation (less disk reads) | {
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mariosasko",
"id": 47462742,
"login": "mariosasko",
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mariosasko",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6784/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6784/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6784.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6784",
"merged_at": "2024-04-16T16:31:29Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6784.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6784"
} |
https://api.github.com/repos/huggingface/datasets/issues/6783 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6783/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6783/comments | https://api.github.com/repos/huggingface/datasets/issues/6783/events | https://github.com/huggingface/datasets/issues/6783 | 2,228,179,466 | I_kwDODunzps6Ez1IK | 6,783 | AttributeError: module 'numpy' has no attribute 'object'. in Kaggle Notebook | {
"avatar_url": "https://avatars.githubusercontent.com/u/26062262?v=4",
"events_url": "https://api.github.com/users/petrov826/events{/privacy}",
"followers_url": "https://api.github.com/users/petrov826/followers",
"following_url": "https://api.github.com/users/petrov826/following{/other_user}",
"gists_url": "https://api.github.com/users/petrov826/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/petrov826",
"id": 26062262,
"login": "petrov826",
"node_id": "MDQ6VXNlcjI2MDYyMjYy",
"organizations_url": "https://api.github.com/users/petrov826/orgs",
"received_events_url": "https://api.github.com/users/petrov826/received_events",
"repos_url": "https://api.github.com/users/petrov826/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/petrov826/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/petrov826/subscriptions",
"type": "User",
"url": "https://api.github.com/users/petrov826",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 2 | 2024-04-05T14:31:48Z | 2024-04-11T17:18:53Z | 2024-04-11T17:18:53Z | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
# problem
I can't resample audio dataset in Kaggle Notebook. It looks like some code in `datasets` library use aliases that were deprecated in NumPy 1.20.
## code for resampling
```
from datasets import load_dataset, Audio
from transformers import AutoFeatureExtractor
from transformers import AutoModelForAudioClassification, TrainingArguments, Trainer
minds = load_dataset("PolyAI/minds14", name="en-US", split="train")
feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base")
def preprocess_function(examples):
audio_arrays = [x["array"] for x in examples["audio"]]
inputs = feature_extractor(
audio_arrays, sampling_rate=feature_extractor.sampling_rate, max_length=16000, truncation=True
)
return inputs
dataset = dataset.map(preprocess_function, remove_columns="audio", batched=True, batch_size=100)
```
## the error I got
<details>
<summary>Click to expand</summary>
```
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
Cell In[20], line 1
----> 1 dataset = dataset.map(preprocess_function, remove_columns="audio", batched=True, batch_size=100)
2 dataset
File /opt/conda/lib/python3.10/site-packages/datasets/arrow_dataset.py:1955, in Dataset.map(self, function, with_indices, with_rank, input_columns, batched, batch_size, drop_last_batch, remove_columns, keep_in_memory, load_from_cache_file, cache_file_name, writer_batch_size, features, disable_nullable, fn_kwargs, num_proc, suffix_template, new_fingerprint, desc)
1952 disable_tqdm = not logging.is_progress_bar_enabled()
1954 if num_proc is None or num_proc == 1:
-> 1955 return self._map_single(
1956 function=function,
1957 with_indices=with_indices,
1958 with_rank=with_rank,
1959 input_columns=input_columns,
1960 batched=batched,
1961 batch_size=batch_size,
1962 drop_last_batch=drop_last_batch,
1963 remove_columns=remove_columns,
1964 keep_in_memory=keep_in_memory,
1965 load_from_cache_file=load_from_cache_file,
1966 cache_file_name=cache_file_name,
1967 writer_batch_size=writer_batch_size,
1968 features=features,
1969 disable_nullable=disable_nullable,
1970 fn_kwargs=fn_kwargs,
1971 new_fingerprint=new_fingerprint,
1972 disable_tqdm=disable_tqdm,
1973 desc=desc,
1974 )
1975 else:
1977 def format_cache_file_name(cache_file_name, rank):
File /opt/conda/lib/python3.10/site-packages/datasets/arrow_dataset.py:520, in transmit_tasks.<locals>.wrapper(*args, **kwargs)
518 self: "Dataset" = kwargs.pop("self")
519 # apply actual function
--> 520 out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs)
521 datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out]
522 for dataset in datasets:
523 # Remove task templates if a column mapping of the template is no longer valid
File /opt/conda/lib/python3.10/site-packages/datasets/arrow_dataset.py:487, in transmit_format.<locals>.wrapper(*args, **kwargs)
480 self_format = {
481 "type": self._format_type,
482 "format_kwargs": self._format_kwargs,
483 "columns": self._format_columns,
484 "output_all_columns": self._output_all_columns,
485 }
486 # apply actual function
--> 487 out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs)
488 datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out]
489 # re-apply format to the output
File /opt/conda/lib/python3.10/site-packages/datasets/fingerprint.py:458, in fingerprint_transform.<locals>._fingerprint.<locals>.wrapper(*args, **kwargs)
452 kwargs[fingerprint_name] = update_fingerprint(
453 self._fingerprint, transform, kwargs_for_fingerprint
454 )
456 # Call actual function
--> 458 out = func(self, *args, **kwargs)
460 # Update fingerprint of in-place transforms + update in-place history of transforms
462 if inplace: # update after calling func so that the fingerprint doesn't change if the function fails
File /opt/conda/lib/python3.10/site-packages/datasets/arrow_dataset.py:2356, in Dataset._map_single(self, function, with_indices, with_rank, input_columns, batched, batch_size, drop_last_batch, remove_columns, keep_in_memory, load_from_cache_file, cache_file_name, writer_batch_size, features, disable_nullable, fn_kwargs, new_fingerprint, rank, offset, disable_tqdm, desc, cache_only)
2354 writer.write_table(batch)
2355 else:
-> 2356 writer.write_batch(batch)
2357 if update_data and writer is not None:
2358 writer.finalize() # close_stream=bool(buf_writer is None)) # We only close if we are writing in a file
File /opt/conda/lib/python3.10/site-packages/datasets/arrow_writer.py:507, in ArrowWriter.write_batch(self, batch_examples, writer_batch_size)
505 col_try_type = try_features[col] if try_features is not None and col in try_features else None
506 typed_sequence = OptimizedTypedSequence(batch_examples[col], type=col_type, try_type=col_try_type, col=col)
--> 507 arrays.append(pa.array(typed_sequence))
508 inferred_features[col] = typed_sequence.get_inferred_type()
509 schema = inferred_features.arrow_schema if self.pa_writer is None else self.schema
File /opt/conda/lib/python3.10/site-packages/pyarrow/array.pxi:236, in pyarrow.lib.array()
File /opt/conda/lib/python3.10/site-packages/pyarrow/array.pxi:110, in pyarrow.lib._handle_arrow_array_protocol()
File /opt/conda/lib/python3.10/site-packages/datasets/arrow_writer.py:184, in TypedSequence.__arrow_array__(self, type)
182 out = numpy_to_pyarrow_listarray(data)
183 elif isinstance(data, list) and data and isinstance(first_non_null_value(data)[1], np.ndarray):
--> 184 out = list_of_np_array_to_pyarrow_listarray(data)
185 else:
186 trying_cast_to_python_objects = True
File /opt/conda/lib/python3.10/site-packages/datasets/features/features.py:1174, in list_of_np_array_to_pyarrow_listarray(l_arr, type)
1172 """Build a PyArrow ListArray from a possibly nested list of NumPy arrays"""
1173 if len(l_arr) > 0:
-> 1174 return list_of_pa_arrays_to_pyarrow_listarray(
1175 [numpy_to_pyarrow_listarray(arr, type=type) if arr is not None else None for arr in l_arr]
1176 )
1177 else:
1178 return pa.array([], type=type)
File /opt/conda/lib/python3.10/site-packages/datasets/features/features.py:1163, in list_of_pa_arrays_to_pyarrow_listarray(l_arr)
1160 null_indices = [i for i, arr in enumerate(l_arr) if arr is None]
1161 l_arr = [arr for arr in l_arr if arr is not None]
1162 offsets = np.cumsum(
-> 1163 [0] + [len(arr) for arr in l_arr], dtype=np.object
1164 ) # convert to dtype object to allow None insertion
1165 offsets = np.insert(offsets, null_indices, None)
1166 offsets = pa.array(offsets, type=pa.int32())
File /opt/conda/lib/python3.10/site-packages/numpy/__init__.py:324, in __getattr__(attr)
319 warnings.warn(
320 f"In the future `np.{attr}` will be defined as the "
321 "corresponding NumPy scalar.", FutureWarning, stacklevel=2)
323 if attr in __former_attrs__:
--> 324 raise AttributeError(__former_attrs__[attr])
326 if attr == 'testing':
327 import numpy.testing as testing
AttributeError: module 'numpy' has no attribute 'object'.
`np.object` was a deprecated alias for the builtin `object`. To avoid this error in existing code, use `object` by itself. Doing this will not modify any behavior and is safe.
The aliases was originally deprecated in NumPy 1.20; for more details and guidance see the original release note at:
https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations
```
</details>
### Steps to reproduce the bug
Run above code in Kaggle Notebook.
### Expected behavior
I can resample audio data without fail.
### Environment info
- `datasets` version: 2.1.0
- Platform: Linux-5.15.133+-x86_64-with-glibc2.31
- Python version: 3.10.13
- PyArrow version: 11.0.0
- Pandas version: 2.2.1 | {
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mariosasko",
"id": 47462742,
"login": "mariosasko",
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mariosasko",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6783/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6783/timeline | null | completed | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6782 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6782/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6782/comments | https://api.github.com/repos/huggingface/datasets/issues/6782/events | https://github.com/huggingface/datasets/issues/6782 | 2,228,081,955 | I_kwDODunzps6EzdUj | 6,782 | Image cast_storage very slow for arrays (e.g. numpy, tensors) | {
"avatar_url": "https://avatars.githubusercontent.com/u/37351874?v=4",
"events_url": "https://api.github.com/users/Modexus/events{/privacy}",
"followers_url": "https://api.github.com/users/Modexus/followers",
"following_url": "https://api.github.com/users/Modexus/following{/other_user}",
"gists_url": "https://api.github.com/users/Modexus/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/Modexus",
"id": 37351874,
"login": "Modexus",
"node_id": "MDQ6VXNlcjM3MzUxODc0",
"organizations_url": "https://api.github.com/users/Modexus/orgs",
"received_events_url": "https://api.github.com/users/Modexus/received_events",
"repos_url": "https://api.github.com/users/Modexus/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/Modexus/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Modexus/subscriptions",
"type": "User",
"url": "https://api.github.com/users/Modexus",
"user_view_type": "public"
} | [] | open | false | null | [] | null | 3 | 2024-04-05T13:46:54Z | 2024-04-10T14:36:13Z | null | CONTRIBUTOR | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | Update: see comments below
### Describe the bug
Operations that save an image from a path are very slow.
I believe the reason for this is that the image data (`numpy`) is converted into `pyarrow` format but then back to python using `.pylist()` before being converted to a numpy array again.
`pylist` is already slow but used on a multi-dimensional numpy array such as an image it takes a very long time.
From the trace below we can see that `__arrow_array__` takes a long time.
It is currently also called in `get_inferred_type`, this should be removable #6781 but doesn't change the underyling issue.
The conversion to `pyarrow` and back also leads to the `numpy` array having type `int64` which causes a warning message because the image type excepts `uint8`.
However, originally the `numpy` image array was in `uint8`.
### Steps to reproduce the bug
```python
from PIL import Image
import numpy as np
import datasets
import cProfile
image = Image.fromarray(np.random.randint(0, 255, (2048, 2048, 3), dtype=np.uint8))
image.save("test_image.jpg")
ds = datasets.Dataset.from_dict(
{"image": ["test_image.jpg"]},
features=datasets.Features({"image": datasets.Image(decode=True)}),
)
# load as numpy array, e.g. for further processing with map
# same result as map returning numpy arrays
ds.set_format("numpy")
cProfile.run("ds.map(writer_batch_size=1, load_from_cache_file=False)", "restats")
```
```bash
Fri Apr 5 14:56:17 2024 restats
66817 function calls (64992 primitive calls) in 33.382 seconds
Ordered by: cumulative time
List reduced from 1073 to 20 due to restriction <20>
ncalls tottime percall cumtime percall filename:lineno(function)
46/1 0.000 0.000 33.382 33.382 {built-in method builtins.exec}
1 0.000 0.000 33.382 33.382 <string>:1(<module>)
1 0.000 0.000 33.382 33.382 arrow_dataset.py:594(wrapper)
1 0.000 0.000 33.382 33.382 arrow_dataset.py:551(wrapper)
1 0.000 0.000 33.379 33.379 arrow_dataset.py:2916(map)
4 0.000 0.000 33.327 8.332 arrow_dataset.py:3277(_map_single)
1 0.000 0.000 33.311 33.311 arrow_writer.py:465(write)
2 0.000 0.000 33.311 16.656 arrow_writer.py:423(write_examples_on_file)
1 0.000 0.000 33.311 33.311 arrow_writer.py:527(write_batch)
2 14.484 7.242 33.260 16.630 arrow_writer.py:161(__arrow_array__)
1 0.001 0.001 16.438 16.438 arrow_writer.py:121(get_inferred_type)
1 0.000 0.000 14.398 14.398 threading.py:637(wait)
1 0.000 0.000 14.398 14.398 threading.py:323(wait)
8 14.398 1.800 14.398 1.800 {method 'acquire' of '_thread.lock' objects}
4/2 0.000 0.000 4.337 2.169 table.py:1800(wrapper)
2 0.000 0.000 4.337 2.169 table.py:1950(cast_array_to_feature)
2 0.475 0.238 4.337 2.169 image.py:209(cast_storage)
9 2.583 0.287 2.583 0.287 {built-in method numpy.array}
2 0.000 0.000 1.284 0.642 image.py:319(encode_np_array)
2 0.000 0.000 1.246 0.623 image.py:301(image_to_bytes)
```
### Expected behavior
The `numpy` image data should be passed through as it will be directly consumed by `pillow` to convert it to bytes.
As an example one can replace `list_of_np_array_to_pyarrow_listarray(data)` in `__arrow_array__` with just `out = data` as a test.
We have to change `cast_storage` of the `Image` feature so it handles the passed through data (& if to handle type before)
```python
bytes_array = pa.array(
[encode_np_array(arr)["bytes"] if arr is not None else None for arr in storage],
type=pa.binary(),
)
```
Leading to the following:
```bash
Fri Apr 5 15:44:27 2024 restats
66419 function calls (64595 primitive calls) in 0.937 seconds
Ordered by: cumulative time
List reduced from 1023 to 20 due to restriction <20>
ncalls tottime percall cumtime percall filename:lineno(function)
47/1 0.000 0.000 0.935 0.935 {built-in method builtins.exec}
2/1 0.000 0.000 0.935 0.935 <string>:1(<module>)
2/1 0.000 0.000 0.934 0.934 arrow_dataset.py:594(wrapper)
2/1 0.000 0.000 0.934 0.934 arrow_dataset.py:551(wrapper)
2/1 0.000 0.000 0.934 0.934 arrow_dataset.py:2916(map)
4 0.000 0.000 0.933 0.233 arrow_dataset.py:3277(_map_single)
1 0.000 0.000 0.883 0.883 arrow_writer.py:466(write)
2 0.000 0.000 0.883 0.441 arrow_writer.py:424(write_examples_on_file)
1 0.000 0.000 0.882 0.882 arrow_writer.py:528(write_batch)
2 0.000 0.000 0.877 0.439 arrow_writer.py:161(__arrow_array__)
4/2 0.000 0.000 0.877 0.439 table.py:1800(wrapper)
2 0.000 0.000 0.877 0.439 table.py:1950(cast_array_to_feature)
2 0.009 0.005 0.877 0.439 image.py:209(cast_storage)
2 0.000 0.000 0.868 0.434 image.py:335(encode_np_array)
2 0.000 0.000 0.856 0.428 image.py:317(image_to_bytes)
2 0.000 0.000 0.822 0.411 Image.py:2376(save)
2 0.000 0.000 0.822 0.411 PngImagePlugin.py:1233(_save)
2 0.000 0.000 0.822 0.411 ImageFile.py:517(_save)
2 0.000 0.000 0.821 0.411 ImageFile.py:545(_encode_tile)
589 0.803 0.001 0.803 0.001 {method 'encode' of 'ImagingEncoder' objects}
```
This is of course only a test as it passes through all `numpy` arrays irrespective of if they should be an image.
Also I guess `cast_storage` is meant for casting `pyarrow` storage exclusively.
Converting to `pyarrow` array seems like a good solution as it also handles `pytorch` tensors etc., maybe there is a more efficient way to create a PIL image from a `pyarrow` array?
Not sure how this should be handled but I would be happy to help if there is a good solution.
### Environment info
- `datasets` version: 2.18.1.dev0
- Platform: Linux-6.7.11-200.fc39.x86_64-x86_64-with-glibc2.38
- Python version: 3.12.2
- `huggingface_hub` version: 0.22.2
- PyArrow version: 15.0.2
- Pandas version: 2.2.1
- `fsspec` version: 2024.3.1 | null | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6782/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6782/timeline | null | null | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6781 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6781/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6781/comments | https://api.github.com/repos/huggingface/datasets/issues/6781/events | https://github.com/huggingface/datasets/pull/6781 | 2,228,026,497 | PR_kwDODunzps5r2DMe | 6,781 | Remove get_inferred_type from ArrowWriter write_batch | {
"avatar_url": "https://avatars.githubusercontent.com/u/37351874?v=4",
"events_url": "https://api.github.com/users/Modexus/events{/privacy}",
"followers_url": "https://api.github.com/users/Modexus/followers",
"following_url": "https://api.github.com/users/Modexus/following{/other_user}",
"gists_url": "https://api.github.com/users/Modexus/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/Modexus",
"id": 37351874,
"login": "Modexus",
"node_id": "MDQ6VXNlcjM3MzUxODc0",
"organizations_url": "https://api.github.com/users/Modexus/orgs",
"received_events_url": "https://api.github.com/users/Modexus/received_events",
"repos_url": "https://api.github.com/users/Modexus/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/Modexus/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Modexus/subscriptions",
"type": "User",
"url": "https://api.github.com/users/Modexus",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 2 | 2024-04-05T13:21:05Z | 2024-04-09T07:49:11Z | 2024-04-09T07:49:11Z | CONTRIBUTOR | null | null | null | Inferring the type seems to be unnecessary given that the pyarrow array has already been created.
Because pyarrow array creation is sometimes extremely slow this doubles the time write_batch takes. | {
"avatar_url": "https://avatars.githubusercontent.com/u/37351874?v=4",
"events_url": "https://api.github.com/users/Modexus/events{/privacy}",
"followers_url": "https://api.github.com/users/Modexus/followers",
"following_url": "https://api.github.com/users/Modexus/following{/other_user}",
"gists_url": "https://api.github.com/users/Modexus/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/Modexus",
"id": 37351874,
"login": "Modexus",
"node_id": "MDQ6VXNlcjM3MzUxODc0",
"organizations_url": "https://api.github.com/users/Modexus/orgs",
"received_events_url": "https://api.github.com/users/Modexus/received_events",
"repos_url": "https://api.github.com/users/Modexus/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/Modexus/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Modexus/subscriptions",
"type": "User",
"url": "https://api.github.com/users/Modexus",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6781/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6781/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6781.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6781",
"merged_at": null,
"patch_url": "https://github.com/huggingface/datasets/pull/6781.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6781"
} |
https://api.github.com/repos/huggingface/datasets/issues/6780 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6780/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6780/comments | https://api.github.com/repos/huggingface/datasets/issues/6780/events | https://github.com/huggingface/datasets/pull/6780 | 2,226,160,096 | PR_kwDODunzps5rvkyj | 6,780 | Fix CI | {
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mariosasko",
"id": 47462742,
"login": "mariosasko",
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mariosasko",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 2 | 2024-04-04T17:45:04Z | 2024-04-04T18:46:04Z | 2024-04-04T18:23:34Z | COLLABORATOR | null | null | null | Updates the `wmt_t2t` test to pin the `revision` to the version with a loading script (cc @albertvillanova).
Additionally, it replaces the occurrences of the `lhoestq/test` repo id with `hf-internal-testing/dataset_with_script` and re-enables logging checks in the `Dataset.from_sql` tests. | {
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mariosasko",
"id": 47462742,
"login": "mariosasko",
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mariosasko",
"user_view_type": "public"
} | {
"+1": 1,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 1,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6780/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6780/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6780.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6780",
"merged_at": "2024-04-04T18:23:34Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6780.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6780"
} |
https://api.github.com/repos/huggingface/datasets/issues/6779 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6779/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6779/comments | https://api.github.com/repos/huggingface/datasets/issues/6779/events | https://github.com/huggingface/datasets/pull/6779 | 2,226,075,551 | PR_kwDODunzps5rvSA8 | 6,779 | Install dependencies with `uv` in CI | {
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mariosasko",
"id": 47462742,
"login": "mariosasko",
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mariosasko",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 2 | 2024-04-04T17:02:51Z | 2024-04-08T13:34:01Z | 2024-04-08T13:27:44Z | COLLABORATOR | null | null | null | `diffusers` (https://github.com/huggingface/diffusers/pull/7116) and `huggingface_hub` (https://github.com/huggingface/huggingface_hub/pull/2072) also use `uv` to install their dependencies, so we can do the same here.
It seems to make the "Install dependencies" step in the `ubuntu` jobs 5-8x faster and 1.5-2x in the `windows` one.
Besides introducing `uv` in CI, this PR bumps the `tensorflow` minimal version requirement to align with Transformers and simplifies the SpaCy hashing tests (use blank language models instead of the pre-trained ones)
| {
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mariosasko",
"id": 47462742,
"login": "mariosasko",
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mariosasko",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6779/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6779/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6779.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6779",
"merged_at": "2024-04-08T13:27:43Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6779.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6779"
} |
https://api.github.com/repos/huggingface/datasets/issues/6778 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6778/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6778/comments | https://api.github.com/repos/huggingface/datasets/issues/6778/events | https://github.com/huggingface/datasets/issues/6778 | 2,226,040,636 | I_kwDODunzps6Erq88 | 6,778 | Dataset.to_csv() missing commas in columns with lists | {
"avatar_url": "https://avatars.githubusercontent.com/u/100041276?v=4",
"events_url": "https://api.github.com/users/mpickard-dataprof/events{/privacy}",
"followers_url": "https://api.github.com/users/mpickard-dataprof/followers",
"following_url": "https://api.github.com/users/mpickard-dataprof/following{/other_user}",
"gists_url": "https://api.github.com/users/mpickard-dataprof/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mpickard-dataprof",
"id": 100041276,
"login": "mpickard-dataprof",
"node_id": "U_kgDOBfaCPA",
"organizations_url": "https://api.github.com/users/mpickard-dataprof/orgs",
"received_events_url": "https://api.github.com/users/mpickard-dataprof/received_events",
"repos_url": "https://api.github.com/users/mpickard-dataprof/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mpickard-dataprof/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mpickard-dataprof/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mpickard-dataprof",
"user_view_type": "public"
} | [] | open | false | null | [] | null | 1 | 2024-04-04T16:46:13Z | 2024-04-08T15:24:41Z | null | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
The `to_csv()` method does not output commas in lists. So when the Dataset is loaded back in the data structure of the column with a list is not correct.
Here's an example:
Obviously, it's not as trivial as inserting commas in the list, since its a comma-separated file. But hopefully there's a way to export the list in a way that it'll be imported by `load_dataset()` correctly.
### Steps to reproduce the bug
Here's some code to reproduce the bug:
```python
from datasets import Dataset
ds = Dataset.from_dict(
{
"pokemon": ["bulbasaur", "squirtle"],
"type": ["grass", "water"]
}
)
def ascii_to_hex(text):
return [ord(c) for c in text]
ds = ds.map(lambda x: {"int": ascii_to_hex(x['pokemon'])})
ds.to_csv('../output/temp.csv')
```
temp.csv then contains:
```
### Expected behavior
ACTUAL OUTPUT:
```
pokemon,type,int
bulbasaur,grass,[ 98 117 108 98 97 115 97 117 114]
squirtle,water,[115 113 117 105 114 116 108 101]
```
EXPECTED OUTPUT:
```
pokemon,type,int
bulbasaur,grass,[98, 117, 108, 98, 97, 115, 97, 117, 114]
squirtle,water,[115, 113, 117, 105, 114, 116, 108, 101]
```
or probably something more like this since it's a CSV file:
```
pokemon,type,int
bulbasaur,grass,"[98, 117, 108, 98, 97, 115, 97, 117, 114]"
squirtle,water,"[115, 113, 117, 105, 114, 116, 108, 101]"
```
### Environment info
### Package Version
Name: datasets
Version: 2.16.1
### Python
version: 3.10.12
### OS Info
PRETTY_NAME="Ubuntu 22.04.4 LTS"
NAME="Ubuntu"
VERSION_ID="22.04"
VERSION="22.04.4 LTS (Jammy Jellyfish)"
VERSION_CODENAME=jammy
ID=ubuntu
ID_LIKE=debian
...
UBUNTU_CODENAME=jammy | null | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6778/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6778/timeline | null | null | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6777 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6777/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6777/comments | https://api.github.com/repos/huggingface/datasets/issues/6777/events | https://github.com/huggingface/datasets/issues/6777 | 2,224,611,247 | I_kwDODunzps6EmN-v | 6,777 | .Jsonl metadata not detected | {
"avatar_url": "https://avatars.githubusercontent.com/u/81643693?v=4",
"events_url": "https://api.github.com/users/nighting0le01/events{/privacy}",
"followers_url": "https://api.github.com/users/nighting0le01/followers",
"following_url": "https://api.github.com/users/nighting0le01/following{/other_user}",
"gists_url": "https://api.github.com/users/nighting0le01/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/nighting0le01",
"id": 81643693,
"login": "nighting0le01",
"node_id": "MDQ6VXNlcjgxNjQzNjkz",
"organizations_url": "https://api.github.com/users/nighting0le01/orgs",
"received_events_url": "https://api.github.com/users/nighting0le01/received_events",
"repos_url": "https://api.github.com/users/nighting0le01/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/nighting0le01/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/nighting0le01/subscriptions",
"type": "User",
"url": "https://api.github.com/users/nighting0le01",
"user_view_type": "public"
} | [] | open | false | null | [] | null | 5 | 2024-04-04T06:31:53Z | 2024-04-05T21:14:48Z | null | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
Hi I have the following directory structure:
|--dataset
| |-- images
| |-- metadata1000.csv
| |-- metadata1000.jsonl
| |-- padded_images
Example of metadata1000.jsonl file
{"caption": "a drawing depicts a full shot of a black t-shirt with a triangular pattern on the front there is a white label on the left side of the triangle", "image": "images/212734.png", "gaussian_padded_image": "padded_images/p_212734.png"}
{"caption": "an eye-level full shot of a large elephant and a baby elephant standing in a watering hole on the left side is a small elephant with its head turned to the right of dry land, trees, and bushes", "image": "images/212735.png", "gaussian_padded_image": "padded_images/p_212735.png"}
.
.
.
I'm trying to use dataset = load_dataset("imagefolder", data_dir='/dataset/', split='train') to load the the dataset, however it is not able to load according to the fields in the metadata1000.jsonl .
please assist to load the data properly
also getting
```
File "/workspace/train_trans_vae.py", line 1089, in <module>
print(get_metadata_patterns('/dataset/'))
File "/opt/conda/lib/python3.10/site-packages/datasets/data_files.py", line 499, in get_metadata_patterns
raise FileNotFoundError(f"The directory at {base_path} doesn't contain any metadata file") from None
FileNotFoundError: The directory at /dataset/ doesn't contain any metadata file
```
when trying
```
from datasets.data_files import get_metadata_patterns
print(get_metadata_patterns('/dataset/'))
```
### Steps to reproduce the bug
dataset Version: 2.18.0
make a similar jsonl and similar directory format
### Expected behavior
creates a dataset object with the column names, caption,image,gaussian_padded_image
### Environment info
dataset Version: 2.18.0 | null | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6777/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6777/timeline | null | null | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6775 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6775/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6775/comments | https://api.github.com/repos/huggingface/datasets/issues/6775/events | https://github.com/huggingface/datasets/issues/6775 | 2,223,457,792 | I_kwDODunzps6Eh0YA | 6,775 | IndexError: Invalid key: 0 is out of bounds for size 0 | {
"avatar_url": "https://avatars.githubusercontent.com/u/38481564?v=4",
"events_url": "https://api.github.com/users/kk2491/events{/privacy}",
"followers_url": "https://api.github.com/users/kk2491/followers",
"following_url": "https://api.github.com/users/kk2491/following{/other_user}",
"gists_url": "https://api.github.com/users/kk2491/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/kk2491",
"id": 38481564,
"login": "kk2491",
"node_id": "MDQ6VXNlcjM4NDgxNTY0",
"organizations_url": "https://api.github.com/users/kk2491/orgs",
"received_events_url": "https://api.github.com/users/kk2491/received_events",
"repos_url": "https://api.github.com/users/kk2491/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/kk2491/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/kk2491/subscriptions",
"type": "User",
"url": "https://api.github.com/users/kk2491",
"user_view_type": "public"
} | [] | open | false | null | [] | null | 7 | 2024-04-03T17:06:30Z | 2024-04-08T01:24:35Z | null | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
I am trying to fine-tune llama2-7b model in GCP. The notebook I am using for this can be found [here](https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/main/notebooks/community/model_garden/model_garden_pytorch_llama2_peft_finetuning.ipynb).
When I use the dataset given in the example, the training gets successfully completed (example dataset can be found [here](https://huggingface.co/datasets/timdettmers/openassistant-guanaco)).
However when I use my own dataset which is in the same format as the example dataset, I get the below error (my dataset can be found [here](https://huggingface.co/datasets/kk2491/finetune_dataset_002)).

I see the files are being read correctly from the logs:

### Steps to reproduce the bug
1. Clone the [vertex-ai-samples](https://github.com/GoogleCloudPlatform/vertex-ai-samples) repository.
2. Run the [llama2-7b peft fine-tuning](https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/main/notebooks/community/model_garden/model_garden_pytorch_llama2_peft_finetuning.ipynb).
3. Change the dataset `kk2491/finetune_dataset_002`
### Expected behavior
The training should complete successfully, and model gets deployed to an endpoint.
### Environment info
Python version : Python 3.10.12
Dataset : https://huggingface.co/datasets/kk2491/finetune_dataset_002
| null | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6775/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6775/timeline | null | null | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6774 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6774/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6774/comments | https://api.github.com/repos/huggingface/datasets/issues/6774/events | https://github.com/huggingface/datasets/issues/6774 | 2,222,164,316 | I_kwDODunzps6Ec4lc | 6,774 | Generating split is very slow when Image format is PNG | {
"avatar_url": "https://avatars.githubusercontent.com/u/22740819?v=4",
"events_url": "https://api.github.com/users/Tramac/events{/privacy}",
"followers_url": "https://api.github.com/users/Tramac/followers",
"following_url": "https://api.github.com/users/Tramac/following{/other_user}",
"gists_url": "https://api.github.com/users/Tramac/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/Tramac",
"id": 22740819,
"login": "Tramac",
"node_id": "MDQ6VXNlcjIyNzQwODE5",
"organizations_url": "https://api.github.com/users/Tramac/orgs",
"received_events_url": "https://api.github.com/users/Tramac/received_events",
"repos_url": "https://api.github.com/users/Tramac/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/Tramac/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Tramac/subscriptions",
"type": "User",
"url": "https://api.github.com/users/Tramac",
"user_view_type": "public"
} | [] | open | false | null | [] | null | 1 | 2024-04-03T07:47:31Z | 2024-04-10T17:28:17Z | null | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
When I create a dataset, it gets stuck while generating cached data.
The image format is PNG, and it will not get stuck when the image format is jpeg.

After debugging, I know that it is because of the `pa.array` operation in [arrow_writer](https://github.com/huggingface/datasets/blob/2.13.0/src/datasets/arrow_writer.py#L553), but i don't why.
### Steps to reproduce the bug
```
from datasets import Dataset
def generator(lines):
for line in lines:
img = Image.open(open(line["url"], "rb"))
# print(img.format) # "PNG"
yield {
"image": img,
}
lines = open(dataset_path, "r")
dataset = Dataset.from_generator(
generator,
gen_kwargs={"lines": lines}
)
```
### Expected behavior
Generating split done.
### Environment info
datasets 2.13.0 | null | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6774/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6774/timeline | null | null | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6773 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6773/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6773/comments | https://api.github.com/repos/huggingface/datasets/issues/6773/events | https://github.com/huggingface/datasets/issues/6773 | 2,221,049,121 | I_kwDODunzps6EYoUh | 6,773 | Dataset on Hub re-downloads every time? | {
"avatar_url": "https://avatars.githubusercontent.com/u/9099139?v=4",
"events_url": "https://api.github.com/users/manestay/events{/privacy}",
"followers_url": "https://api.github.com/users/manestay/followers",
"following_url": "https://api.github.com/users/manestay/following{/other_user}",
"gists_url": "https://api.github.com/users/manestay/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/manestay",
"id": 9099139,
"login": "manestay",
"node_id": "MDQ6VXNlcjkwOTkxMzk=",
"organizations_url": "https://api.github.com/users/manestay/orgs",
"received_events_url": "https://api.github.com/users/manestay/received_events",
"repos_url": "https://api.github.com/users/manestay/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/manestay/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/manestay/subscriptions",
"type": "User",
"url": "https://api.github.com/users/manestay",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 5 | 2024-04-02T17:23:22Z | 2024-04-08T18:43:45Z | 2024-04-08T18:43:45Z | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
Hi, I have a dataset on the hub [here](https://huggingface.co/datasets/manestay/borderlines). It has 1k+ downloads, which I sure is mostly just me and my colleagues working with it. It should have far fewer, since I'm using the same machine with a properly set up HF_HOME variable. However, whenever I run the below function `load_borderlines_hf`, it downloads the entire dataset from the hub and then does the other logic:
https://github.com/manestay/borderlines/blob/4e161f444661e2ebfe643f3fe149d9258d63a57d/run_gpt/lib.py#L80
Let me know what I'm doing wrong here, or if it's a bug with the `datasets` library itself. On the hub I have my data stored in CSVs, but several columns are lists, so that's why I have the code to map splitting on `;`. I looked into dataset loading scripts, but it seemed difficult to set up. I have verified that other `datasets` and `models` on my system are using the cache properly (e.g. I have a 13B parameter model and large datasets, but those are cached and don't redownload).
__EDIT: __ as pointed out in the discussion below, it may be the `map()` calls that aren't being cached properly. Supposing the `load_dataset()` retrieve from the cache, then it should be the case that the `map()` calls also retrieve from the cached output. But the `map()` commands re-execute sometimes.
### Steps to reproduce the bug
1. Copy and paste the function from [here](https://github.com/manestay/borderlines/blob/4e161f444661e2ebfe643f3fe149d9258d63a57d/run_gpt/lib.py#L80) (lines 80-100)
2. Run it in Python `load_borderlines_hf(None)`
3. It completes successfully, downloading from HF hub, then doing the mapping logic etc.
4. If you run it again after some time, it will re-download, ignoring the cache
### Expected behavior
Re-running the code, which calls `datasets.load_dataset('manestay/borderlines', 'territories')`, should use the cached version
### Environment info
- `datasets` version: 2.16.1
- Platform: Linux-5.14.21-150500.55.7-default-x86_64-with-glibc2.31
- Python version: 3.10.13
- `huggingface_hub` version: 0.20.3
- PyArrow version: 15.0.0
- Pandas version: 1.5.3
- `fsspec` version: 2023.10.0 | {
"avatar_url": "https://avatars.githubusercontent.com/u/9099139?v=4",
"events_url": "https://api.github.com/users/manestay/events{/privacy}",
"followers_url": "https://api.github.com/users/manestay/followers",
"following_url": "https://api.github.com/users/manestay/following{/other_user}",
"gists_url": "https://api.github.com/users/manestay/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/manestay",
"id": 9099139,
"login": "manestay",
"node_id": "MDQ6VXNlcjkwOTkxMzk=",
"organizations_url": "https://api.github.com/users/manestay/orgs",
"received_events_url": "https://api.github.com/users/manestay/received_events",
"repos_url": "https://api.github.com/users/manestay/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/manestay/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/manestay/subscriptions",
"type": "User",
"url": "https://api.github.com/users/manestay",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6773/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6773/timeline | null | completed | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6772 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6772/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6772/comments | https://api.github.com/repos/huggingface/datasets/issues/6772/events | https://github.com/huggingface/datasets/pull/6772 | 2,220,851,533 | PR_kwDODunzps5rdKZ2 | 6,772 | `remove_columns`/`rename_columns` doc fixes | {
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mariosasko",
"id": 47462742,
"login": "mariosasko",
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mariosasko",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 2 | 2024-04-02T15:41:28Z | 2024-04-02T16:28:45Z | 2024-04-02T16:17:46Z | COLLABORATOR | null | null | null | Use more consistent wording in `remove_columns` to explain why it's faster than `map` and update `remove_columns`/`rename_columns` docstrings to fix in-place calls.
Reported in https://github.com/huggingface/datasets/issues/6700 | {
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mariosasko",
"id": 47462742,
"login": "mariosasko",
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mariosasko",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6772/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6772/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6772.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6772",
"merged_at": "2024-04-02T16:17:46Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6772.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6772"
} |
https://api.github.com/repos/huggingface/datasets/issues/6771 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6771/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6771/comments | https://api.github.com/repos/huggingface/datasets/issues/6771/events | https://github.com/huggingface/datasets/issues/6771 | 2,220,131,457 | I_kwDODunzps6EVISB | 6,771 | Datasets FileNotFoundError when trying to generate examples. | {
"avatar_url": "https://avatars.githubusercontent.com/u/26197115?v=4",
"events_url": "https://api.github.com/users/RitchieP/events{/privacy}",
"followers_url": "https://api.github.com/users/RitchieP/followers",
"following_url": "https://api.github.com/users/RitchieP/following{/other_user}",
"gists_url": "https://api.github.com/users/RitchieP/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/RitchieP",
"id": 26197115,
"login": "RitchieP",
"node_id": "MDQ6VXNlcjI2MTk3MTE1",
"organizations_url": "https://api.github.com/users/RitchieP/orgs",
"received_events_url": "https://api.github.com/users/RitchieP/received_events",
"repos_url": "https://api.github.com/users/RitchieP/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/RitchieP/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/RitchieP/subscriptions",
"type": "User",
"url": "https://api.github.com/users/RitchieP",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 2 | 2024-04-02T10:24:57Z | 2024-04-04T14:22:03Z | 2024-04-04T14:22:03Z | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Discussed in https://github.com/huggingface/datasets/discussions/6768
<div type='discussions-op-text'>
<sup>Originally posted by **RitchieP** April 1, 2024</sup>
Currently, I have a dataset hosted on Huggingface with a custom script [here](https://huggingface.co/datasets/RitchieP/VerbaLex_voice).
I'm loading my dataset as below.
```py
from datasets import load_dataset, IterableDatasetDict
dataset = IterableDatasetDict()
dataset["train"] = load_dataset("RitchieP/VerbaLex_voice", "ar", split="train", use_auth_token=True, streaming=True)
dataset["test"] = load_dataset("RitchieP/VerbaLex_voice", "ar", split="test", use_auth_token=True, streaming=True)
```
And when I try to see the data I have loaded with
```py
list(dataset["train"].take(1))
```
And it gives me this stack trace
```
---------------------------------------------------------------------------
FileNotFoundError Traceback (most recent call last)
Cell In[2], line 1
----> 1 list(dataset["train"].take(1))
File /opt/conda/lib/python3.10/site-packages/datasets/iterable_dataset.py:1388, in IterableDataset.__iter__(self)
1385 yield formatter.format_row(pa_table)
1386 return
-> 1388 for key, example in ex_iterable:
1389 if self.features:
1390 # `IterableDataset` automatically fills missing columns with None.
1391 # This is done with `_apply_feature_types_on_example`.
1392 example = _apply_feature_types_on_example(
1393 example, self.features, token_per_repo_id=self._token_per_repo_id
1394 )
File /opt/conda/lib/python3.10/site-packages/datasets/iterable_dataset.py:1044, in TakeExamplesIterable.__iter__(self)
1043 def __iter__(self):
-> 1044 yield from islice(self.ex_iterable, self.n)
File /opt/conda/lib/python3.10/site-packages/datasets/iterable_dataset.py:234, in ExamplesIterable.__iter__(self)
233 def __iter__(self):
--> 234 yield from self.generate_examples_fn(**self.kwargs)
File ~/.cache/huggingface/modules/datasets_modules/datasets/RitchieP--VerbaLex_voice/9465eaee58383cf9d7c3e14111d7abaea56398185a641b646897d6df4e4732f7/VerbaLex_voice.py:127, in VerbaLexVoiceDataset._generate_examples(self, local_extracted_archive_paths, archives, meta_path)
125 for i, audio_archive in enumerate(archives):
126 print(audio_archive)
--> 127 for path, file in audio_archive:
128 _, filename = os.path.split(path)
129 if filename in metadata:
File /opt/conda/lib/python3.10/site-packages/datasets/download/streaming_download_manager.py:869, in _IterableFromGenerator.__iter__(self)
868 def __iter__(self):
--> 869 yield from self.generator(*self.args, **self.kwargs)
File /opt/conda/lib/python3.10/site-packages/datasets/download/streaming_download_manager.py:919, in ArchiveIterable._iter_from_urlpath(cls, urlpath, download_config)
915 @classmethod
916 def _iter_from_urlpath(
917 cls, urlpath: str, download_config: Optional[DownloadConfig] = None
918 ) -> Generator[Tuple, None, None]:
--> 919 compression = _get_extraction_protocol(urlpath, download_config=download_config)
920 # Set block_size=0 to get faster streaming
921 # (e.g. for hf:// and https:// it uses streaming Requests file-like instances)
922 with xopen(urlpath, "rb", download_config=download_config, block_size=0) as f:
File /opt/conda/lib/python3.10/site-packages/datasets/download/streaming_download_manager.py:400, in _get_extraction_protocol(urlpath, download_config)
398 urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config)
399 try:
--> 400 with fsspec.open(urlpath, **(storage_options or {})) as f:
401 return _get_extraction_protocol_with_magic_number(f)
402 except FileNotFoundError:
File /opt/conda/lib/python3.10/site-packages/fsspec/core.py:100, in OpenFile.__enter__(self)
97 def __enter__(self):
98 mode = self.mode.replace("t", "").replace("b", "") + "b"
--> 100 f = self.fs.open(self.path, mode=mode)
102 self.fobjects = [f]
104 if self.compression is not None:
File /opt/conda/lib/python3.10/site-packages/fsspec/spec.py:1307, in AbstractFileSystem.open(self, path, mode, block_size, cache_options, compression, **kwargs)
1305 else:
1306 ac = kwargs.pop("autocommit", not self._intrans)
-> 1307 f = self._open(
1308 path,
1309 mode=mode,
1310 block_size=block_size,
1311 autocommit=ac,
1312 cache_options=cache_options,
1313 **kwargs,
1314 )
1315 if compression is not None:
1316 from fsspec.compression import compr
File /opt/conda/lib/python3.10/site-packages/fsspec/implementations/local.py:180, in LocalFileSystem._open(self, path, mode, block_size, **kwargs)
178 if self.auto_mkdir and "w" in mode:
179 self.makedirs(self._parent(path), exist_ok=True)
--> 180 return LocalFileOpener(path, mode, fs=self, **kwargs)
File /opt/conda/lib/python3.10/site-packages/fsspec/implementations/local.py:302, in LocalFileOpener.__init__(self, path, mode, autocommit, fs, compression, **kwargs)
300 self.compression = get_compression(path, compression)
301 self.blocksize = io.DEFAULT_BUFFER_SIZE
--> 302 self._open()
File /opt/conda/lib/python3.10/site-packages/fsspec/implementations/local.py:307, in LocalFileOpener._open(self)
305 if self.f is None or self.f.closed:
306 if self.autocommit or "w" not in self.mode:
--> 307 self.f = open(self.path, mode=self.mode)
308 if self.compression:
309 compress = compr[self.compression]
FileNotFoundError: [Errno 2] No such file or directory: '/kaggle/working/h'
```
After looking into the stack trace, and referring to the source codes, it looks like its trying to access a directory in the notebook's environment and I don't understand why.
Not sure if its a bug in Datasets library, so I'm opening a discussions first. Feel free to ask for more information if needed. Appreciate any help in advance!</div>
Hi, referring to the discussion title above, after further digging, I think it's an issue within the datasets library. But not quite sure where it is.
If you require any more info or actions from me, please let me know. Appreciate any help in advance! | {
"avatar_url": "https://avatars.githubusercontent.com/u/26197115?v=4",
"events_url": "https://api.github.com/users/RitchieP/events{/privacy}",
"followers_url": "https://api.github.com/users/RitchieP/followers",
"following_url": "https://api.github.com/users/RitchieP/following{/other_user}",
"gists_url": "https://api.github.com/users/RitchieP/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/RitchieP",
"id": 26197115,
"login": "RitchieP",
"node_id": "MDQ6VXNlcjI2MTk3MTE1",
"organizations_url": "https://api.github.com/users/RitchieP/orgs",
"received_events_url": "https://api.github.com/users/RitchieP/received_events",
"repos_url": "https://api.github.com/users/RitchieP/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/RitchieP/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/RitchieP/subscriptions",
"type": "User",
"url": "https://api.github.com/users/RitchieP",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6771/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6771/timeline | null | completed | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6770 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6770/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6770/comments | https://api.github.com/repos/huggingface/datasets/issues/6770/events | https://github.com/huggingface/datasets/issues/6770 | 2,218,991,883 | I_kwDODunzps6EQyEL | 6,770 | [Bug Report] `datasets==2.18.0` is not compatible with `fsspec==2023.12.2` | {
"avatar_url": "https://avatars.githubusercontent.com/u/19348888?v=4",
"events_url": "https://api.github.com/users/fshp971/events{/privacy}",
"followers_url": "https://api.github.com/users/fshp971/followers",
"following_url": "https://api.github.com/users/fshp971/following{/other_user}",
"gists_url": "https://api.github.com/users/fshp971/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/fshp971",
"id": 19348888,
"login": "fshp971",
"node_id": "MDQ6VXNlcjE5MzQ4ODg4",
"organizations_url": "https://api.github.com/users/fshp971/orgs",
"received_events_url": "https://api.github.com/users/fshp971/received_events",
"repos_url": "https://api.github.com/users/fshp971/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/fshp971/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/fshp971/subscriptions",
"type": "User",
"url": "https://api.github.com/users/fshp971",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 1 | 2024-04-01T20:17:48Z | 2024-04-11T17:31:44Z | 2024-04-11T17:31:44Z | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
`Datasets==2.18.0` is not compatible with `fsspec==2023.12.2`.
I have to downgrade fsspec to `fsspec==2023.10.0` to make `Datasets==2.18.0` work properly.
### Steps to reproduce the bug
To reproduce the bug:
1. Make sure that `Datasets==2.18.0` and `fsspec==2023.12.2`.
2. Run the following code:
```
from datasets import load_dataset
dataset = load_dataset("trec")
```
3. Then one will get the following error message:
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/opt/conda/lib/python3.10/site-packages/datasets/load.py", line 2556, in load_dataset
builder_instance = load_dataset_builder(
File "/opt/conda/lib/python3.10/site-packages/datasets/load.py", line 2265, in load_dataset_builder
builder_instance: DatasetBuilder = builder_cls(
File "/opt/conda/lib/python3.10/site-packages/datasets/builder.py", line 371, in __init__
self.config, self.config_id = self._create_builder_config(
File "/opt/conda/lib/python3.10/site-packages/datasets/builder.py", line 620, in _create_builder_config
builder_config._resolve_data_files(
File "/opt/conda/lib/python3.10/site-packages/datasets/builder.py", line 211, in _resolve_data_files
self.data_files = self.data_files.resolve(base_path, download_config)
File "/opt/conda/lib/python3.10/site-packages/datasets/data_files.py", line 799, in resolve
out[key] = data_files_patterns_list.resolve(base_path, download_config)
File "/opt/conda/lib/python3.10/site-packages/datasets/data_files.py", line 752, in resolve
resolve_pattern(
File "/opt/conda/lib/python3.10/site-packages/datasets/data_files.py", line 393, in resolve_pattern
raise FileNotFoundError(error_msg)
FileNotFoundError: Unable to find 'hf://datasets/trec@65752bf53af25bc935a0dce92fb5b6c930728450/default/train/0000.parquet' with any supported extension ['.csv', '.tsv', '.json', '.jsonl', '.parquet', '.geoparquet', '.gpq', '.arrow', '.txt', '.tar', '.blp', '.bmp', '.dib', '.bufr', '.cur', '.pcx', '.dcx', '.dds', '.ps', '.eps', '.fit', '.fits', '.fli', '.flc', '.ftc', '.ftu', '.gbr', '.gif', '.grib', '.h5', '.hdf', '.png', '.apng', '.jp2', '.j2k', '.jpc', '.jpf', '.jpx', '.j2c', '.icns', '.ico', '.im', '.iim', '.tif', '.tiff', '.jfif', '.jpe', '.jpg', '.jpeg', '.mpg', '.mpeg', '.msp', '.pcd', '.pxr', '.pbm', '.pgm', '.ppm', '.pnm', '.psd', '.bw', '.rgb', '.rgba', '.sgi', '.ras', '.tga', '.icb', '.vda', '.vst', '.webp', '.wmf', '.emf', '.xbm', '.xpm', '.BLP', '.BMP', '.DIB', '.BUFR', '.CUR', '.PCX', '.DCX', '.DDS', '.PS', '.EPS', '.FIT', '.FITS', '.FLI', '.FLC', '.FTC', '.FTU', '.GBR', '.GIF', '.GRIB', '.H5', '.HDF', '.PNG', '.APNG', '.JP2', '.J2K', '.JPC', '.JPF', '.JPX', '.J2C', '.ICNS', '.ICO', '.IM', '.IIM', '.TIF', '.TIFF', '.JFIF', '.JPE', '.JPG', '.JPEG', '.MPG', '.MPEG', '.MSP', '.PCD', '.PXR', '.PBM', '.PGM', '.PPM', '.PNM', '.PSD', '.BW', '.RGB', '.RGBA', '.SGI', '.RAS', '.TGA', '.ICB', '.VDA', '.VST', '.WEBP', '.WMF', '.EMF', '.XBM', '.XPM', '.aiff', '.au', '.avr', '.caf', '.flac', '.htk', '.svx', '.mat4', '.mat5', '.mpc2k', '.ogg', '.paf', '.pvf', '.raw', '.rf64', '.sd2', '.sds', '.ircam', '.voc', '.w64', '.wav', '.nist', '.wavex', '.wve', '.xi', '.mp3', '.opus', '.AIFF', '.AU', '.AVR', '.CAF', '.FLAC', '.HTK', '.SVX', '.MAT4', '.MAT5', '.MPC2K', '.OGG', '.PAF', '.PVF', '.RAW', '.RF64', '.SD2', '.SDS', '.IRCAM', '.VOC', '.W64', '.WAV', '.NIST', '.WAVEX', '.WVE', '.XI', '.MP3', '.OPUS', '.zip']
```
4. Similar issue also found for the following code:
```
dataset = load_dataset("sst", "default")
```
### Expected behavior
If the dataset is loaded correctly, one will have:
```
>>> print(dataset)
DatasetDict({
train: Dataset({
features: ['text', 'coarse_label', 'fine_label'],
num_rows: 5452
})
test: Dataset({
features: ['text', 'coarse_label', 'fine_label'],
num_rows: 500
})
})
>>>
```
### Environment info
- `datasets` version: 2.18.0
- Platform: Linux-6.2.0-35-generic-x86_64-with-glibc2.31
- Python version: 3.10.13
- `huggingface_hub` version: 0.20.3
- PyArrow version: 15.0.1
- Pandas version: 2.2.1
- `fsspec` version: 2023.12.2 | {
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mariosasko",
"id": 47462742,
"login": "mariosasko",
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mariosasko",
"user_view_type": "public"
} | {
"+1": 1,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 1,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6770/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6770/timeline | null | completed | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6769 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6769/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6769/comments | https://api.github.com/repos/huggingface/datasets/issues/6769/events | https://github.com/huggingface/datasets/issues/6769 | 2,218,242,015 | I_kwDODunzps6EN6_f | 6,769 | (Willing to PR) Datasets with custom python objects | {
"avatar_url": "https://avatars.githubusercontent.com/u/5236035?v=4",
"events_url": "https://api.github.com/users/fzyzcjy/events{/privacy}",
"followers_url": "https://api.github.com/users/fzyzcjy/followers",
"following_url": "https://api.github.com/users/fzyzcjy/following{/other_user}",
"gists_url": "https://api.github.com/users/fzyzcjy/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/fzyzcjy",
"id": 5236035,
"login": "fzyzcjy",
"node_id": "MDQ6VXNlcjUyMzYwMzU=",
"organizations_url": "https://api.github.com/users/fzyzcjy/orgs",
"received_events_url": "https://api.github.com/users/fzyzcjy/received_events",
"repos_url": "https://api.github.com/users/fzyzcjy/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/fzyzcjy/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/fzyzcjy/subscriptions",
"type": "User",
"url": "https://api.github.com/users/fzyzcjy",
"user_view_type": "public"
} | [
{
"color": "a2eeef",
"default": true,
"description": "New feature or request",
"id": 1935892871,
"name": "enhancement",
"node_id": "MDU6TGFiZWwxOTM1ODkyODcx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement"
}
] | open | false | null | [] | null | 0 | 2024-04-01T13:18:47Z | 2024-04-01T13:36:58Z | null | CONTRIBUTOR | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Feature request
Hi thanks for the library! I would like to have a huggingface Dataset, and one of its column is custom (non-serializable) Python objects. For example, a minimal code:
```
class MyClass:
pass
dataset = datasets.Dataset.from_list([
dict(a=MyClass(), b='hello'),
])
```
It gives error:
```
ArrowInvalid: Could not convert <__main__.MyClass object at 0x7a852830d050> with type MyClass: did not recognize Python value type when inferring an Arrow data type
```
I guess it is because Dataset forces to convert everything into arrow format. However, is there any ways to make the scenario work? Thanks!
### Motivation
(see above)
### Your contribution
Yes, I am happy to PR!
Cross-posted: https://discuss.huggingface.co/t/datasets-with-custom-python-objects/79050?u=fzyzcjy
EDIT: possibly related https://github.com/huggingface/datasets/issues/5766 | null | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 1,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 1,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6769/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6769/timeline | null | null | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6767 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6767/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6767/comments | https://api.github.com/repos/huggingface/datasets/issues/6767/events | https://github.com/huggingface/datasets/pull/6767 | 2,217,065,412 | PR_kwDODunzps5rQO9J | 6,767 | fixing the issue 6755(small typo) | {
"avatar_url": "https://avatars.githubusercontent.com/u/63234112?v=4",
"events_url": "https://api.github.com/users/JINO-ROHIT/events{/privacy}",
"followers_url": "https://api.github.com/users/JINO-ROHIT/followers",
"following_url": "https://api.github.com/users/JINO-ROHIT/following{/other_user}",
"gists_url": "https://api.github.com/users/JINO-ROHIT/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/JINO-ROHIT",
"id": 63234112,
"login": "JINO-ROHIT",
"node_id": "MDQ6VXNlcjYzMjM0MTEy",
"organizations_url": "https://api.github.com/users/JINO-ROHIT/orgs",
"received_events_url": "https://api.github.com/users/JINO-ROHIT/received_events",
"repos_url": "https://api.github.com/users/JINO-ROHIT/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/JINO-ROHIT/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/JINO-ROHIT/subscriptions",
"type": "User",
"url": "https://api.github.com/users/JINO-ROHIT",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 2 | 2024-03-31T16:13:37Z | 2024-04-02T14:14:02Z | 2024-04-02T14:01:18Z | CONTRIBUTOR | null | null | null | Fixed the issue #6755 on the typo mistake | {
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mariosasko",
"id": 47462742,
"login": "mariosasko",
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mariosasko",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6767/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6767/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6767.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6767",
"merged_at": "2024-04-02T14:01:18Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6767.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6767"
} |
https://api.github.com/repos/huggingface/datasets/issues/6765 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6765/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6765/comments | https://api.github.com/repos/huggingface/datasets/issues/6765/events | https://github.com/huggingface/datasets/issues/6765 | 2,215,933,515 | I_kwDODunzps6EFHZL | 6,765 | Compatibility issue between s3fs, fsspec, and datasets | {
"avatar_url": "https://avatars.githubusercontent.com/u/33383515?v=4",
"events_url": "https://api.github.com/users/njbrake/events{/privacy}",
"followers_url": "https://api.github.com/users/njbrake/followers",
"following_url": "https://api.github.com/users/njbrake/following{/other_user}",
"gists_url": "https://api.github.com/users/njbrake/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/njbrake",
"id": 33383515,
"login": "njbrake",
"node_id": "MDQ6VXNlcjMzMzgzNTE1",
"organizations_url": "https://api.github.com/users/njbrake/orgs",
"received_events_url": "https://api.github.com/users/njbrake/received_events",
"repos_url": "https://api.github.com/users/njbrake/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/njbrake/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/njbrake/subscriptions",
"type": "User",
"url": "https://api.github.com/users/njbrake",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 4 | 2024-03-29T19:57:24Z | 2024-11-12T14:50:48Z | 2024-04-03T14:33:12Z | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
Here is the full error stack when installing:
```
ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.
datasets 2.18.0 requires fsspec[http]<=2024.2.0,>=2023.1.0, but you have fsspec 2024.3.1 which is incompatible.
Successfully installed aiobotocore-2.12.1 aioitertools-0.11.0 botocore-1.34.51 fsspec-2024.3.1 jmespath-1.0.1 s3fs-2024.3.1 urllib3-2.0.7 wrapt-1.16.0
```
When I install with pip, pip allows this error to exist while still installing s3fs, but this error breaks poetry, since poetry will refuse to install s3fs because of the dependency conflict.
Maybe I'm missing something so maybe it's not a bug but some mistake on my end? Any input would be helpful. Thanks!
### Steps to reproduce the bug
1. conda create -n tmp python=3.10 -y
2. conda activate tmp
3. pip install datasets
4. pip install s3fs
### Expected behavior
I would expect there to be no error.
### Environment info
MacOS (ARM), Python3.10, conda 23.11.0. | {
"avatar_url": "https://avatars.githubusercontent.com/u/33383515?v=4",
"events_url": "https://api.github.com/users/njbrake/events{/privacy}",
"followers_url": "https://api.github.com/users/njbrake/followers",
"following_url": "https://api.github.com/users/njbrake/following{/other_user}",
"gists_url": "https://api.github.com/users/njbrake/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/njbrake",
"id": 33383515,
"login": "njbrake",
"node_id": "MDQ6VXNlcjMzMzgzNTE1",
"organizations_url": "https://api.github.com/users/njbrake/orgs",
"received_events_url": "https://api.github.com/users/njbrake/received_events",
"repos_url": "https://api.github.com/users/njbrake/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/njbrake/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/njbrake/subscriptions",
"type": "User",
"url": "https://api.github.com/users/njbrake",
"user_view_type": "public"
} | {
"+1": 1,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 1,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6765/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6765/timeline | null | completed | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6764 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6764/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6764/comments | https://api.github.com/repos/huggingface/datasets/issues/6764/events | https://github.com/huggingface/datasets/issues/6764 | 2,215,767,119 | I_kwDODunzps6EEexP | 6,764 | load_dataset can't work with symbolic links | {
"avatar_url": "https://avatars.githubusercontent.com/u/13640533?v=4",
"events_url": "https://api.github.com/users/VladimirVincan/events{/privacy}",
"followers_url": "https://api.github.com/users/VladimirVincan/followers",
"following_url": "https://api.github.com/users/VladimirVincan/following{/other_user}",
"gists_url": "https://api.github.com/users/VladimirVincan/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/VladimirVincan",
"id": 13640533,
"login": "VladimirVincan",
"node_id": "MDQ6VXNlcjEzNjQwNTMz",
"organizations_url": "https://api.github.com/users/VladimirVincan/orgs",
"received_events_url": "https://api.github.com/users/VladimirVincan/received_events",
"repos_url": "https://api.github.com/users/VladimirVincan/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/VladimirVincan/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/VladimirVincan/subscriptions",
"type": "User",
"url": "https://api.github.com/users/VladimirVincan",
"user_view_type": "public"
} | [
{
"color": "a2eeef",
"default": true,
"description": "New feature or request",
"id": 1935892871,
"name": "enhancement",
"node_id": "MDU6TGFiZWwxOTM1ODkyODcx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement"
}
] | open | false | null | [] | null | 1 | 2024-03-29T17:49:28Z | 2025-04-29T15:06:28Z | null | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Feature request
Enable the `load_dataset` function to load local datasets with symbolic links.
E.g, this dataset can be loaded:
βββ example_dataset/
β βββ data/
β β βββ train/
β β β βββ file0
β β β βββ file1
β β βββ dev/
β β β βββ file2
β β β βββ file3
β βββ metadata.csv
while this dataset can't:
βββ example_dataset_symlink/
β βββ data/
β β βββ train/
β β β βββ sym0 -> file0
β β β βββ sym1 -> file1
β β βββ dev/
β β β βββ sym2 -> file2
β β β βββ sym3 -> file3
β βββ metadata.csv
I have created an example dataset in order to reproduce the problem:
1. Unzip `example_dataset.zip`.
2. Run `no_symlink.sh`. Training should start without issues.
3. Run `symlink.sh`. You will see that all four examples will be in train split, instead of having two examples in train and two examples in dev. The script won't load the correct audio files.
[example_dataset.zip](https://github.com/huggingface/datasets/files/14807053/example_dataset.zip)
### Motivation
I have a very large dataset locally. Instead of initiating training on the entire dataset, I need to start training on smaller subsets of the data. Due to the purpose of the experiments I am running, I will need to create many smaller datasets with overlapping data. Instead of copying the all the files for each subset, I would prefer copying symbolic links of the data. This way, the memory usage would not significantly increase beyond the initial dataset size.
Advantages of this approach:
- It would leave a smaller memory footprint on the hard drive
- Creating smaller datasets would be much faster
### Your contribution
I would gladly contribute, if this is something useful to the community. It seems like a simple change of code, something like `file_path = os.path.realpath(file_path)` should be added before loading the files. If anyone has insights on how to incorporate this functionality, I would greatly appreciate your knowledge and input. | null | {
"+1": 8,
"-1": 0,
"confused": 0,
"eyes": 3,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 11,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6764/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6764/timeline | null | null | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6763 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6763/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6763/comments | https://api.github.com/repos/huggingface/datasets/issues/6763/events | https://github.com/huggingface/datasets/pull/6763 | 2,213,440,804 | PR_kwDODunzps5rENat | 6,763 | Fix issue with case sensitivity when loading dataset from local cache | {
"avatar_url": "https://avatars.githubusercontent.com/u/58537872?v=4",
"events_url": "https://api.github.com/users/Sumsky21/events{/privacy}",
"followers_url": "https://api.github.com/users/Sumsky21/followers",
"following_url": "https://api.github.com/users/Sumsky21/following{/other_user}",
"gists_url": "https://api.github.com/users/Sumsky21/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/Sumsky21",
"id": 58537872,
"login": "Sumsky21",
"node_id": "MDQ6VXNlcjU4NTM3ODcy",
"organizations_url": "https://api.github.com/users/Sumsky21/orgs",
"received_events_url": "https://api.github.com/users/Sumsky21/received_events",
"repos_url": "https://api.github.com/users/Sumsky21/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/Sumsky21/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Sumsky21/subscriptions",
"type": "User",
"url": "https://api.github.com/users/Sumsky21",
"user_view_type": "public"
} | [] | open | false | null | [] | null | 1 | 2024-03-28T14:52:35Z | 2024-04-20T12:16:45Z | null | NONE | null | null | null | When a dataset with upper-cases in its name is first loaded using `load_dataset()`, the local cache directory is created with all lowercase letters.
However, upon subsequent loads, the current version attempts to locate the cache directory using the dataset's original name, which includes uppercase letters. This discrepancy can lead to confusion and, particularly in offline mode, results in errors.
### Reproduce
```bash
~$ python
Python 3.9.19 (main, Mar 21 2024, 17:11:28)
[GCC 11.2.0] :: Anaconda, Inc. on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> from datasets import load_dataset
>>> dataset = load_dataset("locuslab/TOFU", "full")
>>> quit()
~$ export HF_DATASETS_OFFLINE=1
~$ python
Python 3.9.19 (main, Mar 21 2024, 17:11:28)
[GCC 11.2.0] :: Anaconda, Inc. on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> from datasets import load_dataset
>>> dataset = load_dataset("locuslab/TOFU", "full")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "xxxxxx/anaconda3/envs/llm/lib/python3.9/site-packages/datasets/load.py", line 2556, in load_dataset
builder_instance = load_dataset_builder(
File "xxxxxx/anaconda3/envs/llm/lib/python3.9/site-packages/datasets/load.py", line 2228, in load_dataset_builder
dataset_module = dataset_module_factory(
File "xxxxxx/anaconda3/envs/llm/lib/python3.9/site-packages/datasets/load.py", line 1871, in dataset_module_factory
raise ConnectionError(f"Couldn't reach the Hugging Face Hub for dataset '{path}': {e1}") from None
ConnectionError: Couldn't reach the Hugging Face Hub for dataset 'locuslab/TOFU': Offline mode is enabled.
>>>
```
I fix this issue by lowering the dataset name (`.lower()`) when generating cache_dir. | null | {
"+1": 1,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 1,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6763/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6763/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6763.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6763",
"merged_at": null,
"patch_url": "https://github.com/huggingface/datasets/pull/6763.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6763"
} |
https://api.github.com/repos/huggingface/datasets/issues/6762 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6762/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6762/comments | https://api.github.com/repos/huggingface/datasets/issues/6762/events | https://github.com/huggingface/datasets/pull/6762 | 2,213,275,468 | PR_kwDODunzps5rDpBe | 6,762 | Allow polars as valid output type | {
"avatar_url": "https://avatars.githubusercontent.com/u/11325244?v=4",
"events_url": "https://api.github.com/users/psmyth94/events{/privacy}",
"followers_url": "https://api.github.com/users/psmyth94/followers",
"following_url": "https://api.github.com/users/psmyth94/following{/other_user}",
"gists_url": "https://api.github.com/users/psmyth94/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/psmyth94",
"id": 11325244,
"login": "psmyth94",
"node_id": "MDQ6VXNlcjExMzI1MjQ0",
"organizations_url": "https://api.github.com/users/psmyth94/orgs",
"received_events_url": "https://api.github.com/users/psmyth94/received_events",
"repos_url": "https://api.github.com/users/psmyth94/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/psmyth94/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/psmyth94/subscriptions",
"type": "User",
"url": "https://api.github.com/users/psmyth94",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 3 | 2024-03-28T13:40:28Z | 2024-08-16T15:54:37Z | 2024-08-16T13:10:37Z | CONTRIBUTOR | null | null | null | I was trying out polars as an output for a map function and found that it wasn't a valid return type in `validate_function_output`. Thought that we should accommodate this by creating and adding it to the `allowed_processed_input_types` variable. | {
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/lhoestq",
"id": 42851186,
"login": "lhoestq",
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"type": "User",
"url": "https://api.github.com/users/lhoestq",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6762/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6762/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6762.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6762",
"merged_at": "2024-08-16T13:10:37Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6762.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6762"
} |
https://api.github.com/repos/huggingface/datasets/issues/6761 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6761/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6761/comments | https://api.github.com/repos/huggingface/datasets/issues/6761/events | https://github.com/huggingface/datasets/pull/6761 | 2,212,805,108 | PR_kwDODunzps5rCAu8 | 6,761 | Remove deprecated code | {
"avatar_url": "https://avatars.githubusercontent.com/u/11801849?v=4",
"events_url": "https://api.github.com/users/Wauplin/events{/privacy}",
"followers_url": "https://api.github.com/users/Wauplin/followers",
"following_url": "https://api.github.com/users/Wauplin/following{/other_user}",
"gists_url": "https://api.github.com/users/Wauplin/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/Wauplin",
"id": 11801849,
"login": "Wauplin",
"node_id": "MDQ6VXNlcjExODAxODQ5",
"organizations_url": "https://api.github.com/users/Wauplin/orgs",
"received_events_url": "https://api.github.com/users/Wauplin/received_events",
"repos_url": "https://api.github.com/users/Wauplin/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/Wauplin/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Wauplin/subscriptions",
"type": "User",
"url": "https://api.github.com/users/Wauplin",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 5 | 2024-03-28T09:57:57Z | 2024-03-29T13:27:26Z | 2024-03-29T13:18:13Z | CONTRIBUTOR | null | null | null | What does this PR do?
1. remove `list_files_info` in favor of `list_repo_tree`. As of `0.23`, `list_files_info` will be removed for good. `datasets` had a utility to support both pre-0.20 and post-0.20 versions. Since `hfh` version is already pinned to `>=0.21.2`, I removed the legacy part.
2. `preupload_lfs_files` had also a different behavior between `<0.20` and `>=0.20`. I remove it since huggingface_hub is now pinned to `>=0.21.2`
3. `hf_hub_url` is overwritten to default to the dataset repo_type. I do think it is misleading to keep the same method naming for it. I renamed it to `get_dataset_url` for clarity. Let me know if you prefer to see this change reverted. | {
"avatar_url": "https://avatars.githubusercontent.com/u/11801849?v=4",
"events_url": "https://api.github.com/users/Wauplin/events{/privacy}",
"followers_url": "https://api.github.com/users/Wauplin/followers",
"following_url": "https://api.github.com/users/Wauplin/following{/other_user}",
"gists_url": "https://api.github.com/users/Wauplin/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/Wauplin",
"id": 11801849,
"login": "Wauplin",
"node_id": "MDQ6VXNlcjExODAxODQ5",
"organizations_url": "https://api.github.com/users/Wauplin/orgs",
"received_events_url": "https://api.github.com/users/Wauplin/received_events",
"repos_url": "https://api.github.com/users/Wauplin/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/Wauplin/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Wauplin/subscriptions",
"type": "User",
"url": "https://api.github.com/users/Wauplin",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 1,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 1,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6761/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6761/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6761.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6761",
"merged_at": "2024-03-29T13:18:13Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6761.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6761"
} |
https://api.github.com/repos/huggingface/datasets/issues/6760 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6760/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6760/comments | https://api.github.com/repos/huggingface/datasets/issues/6760/events | https://github.com/huggingface/datasets/issues/6760 | 2,212,288,122 | I_kwDODunzps6D3NZ6 | 6,760 | Load codeparrot/apps raising UnicodeDecodeError in datasets-2.18.0 | {
"avatar_url": "https://avatars.githubusercontent.com/u/17897916?v=4",
"events_url": "https://api.github.com/users/yucc-leon/events{/privacy}",
"followers_url": "https://api.github.com/users/yucc-leon/followers",
"following_url": "https://api.github.com/users/yucc-leon/following{/other_user}",
"gists_url": "https://api.github.com/users/yucc-leon/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/yucc-leon",
"id": 17897916,
"login": "yucc-leon",
"node_id": "MDQ6VXNlcjE3ODk3OTE2",
"organizations_url": "https://api.github.com/users/yucc-leon/orgs",
"received_events_url": "https://api.github.com/users/yucc-leon/received_events",
"repos_url": "https://api.github.com/users/yucc-leon/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/yucc-leon/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/yucc-leon/subscriptions",
"type": "User",
"url": "https://api.github.com/users/yucc-leon",
"user_view_type": "public"
} | [] | open | false | null | [] | null | 4 | 2024-03-28T03:44:26Z | 2024-06-19T07:06:40Z | null | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
This happens with datasets-2.18.0; I downgraded the version to 2.14.6 fixing this temporarily.
```
Traceback (most recent call last):
File "/home/xxx/miniconda3/envs/py310/lib/python3.10/site-packages/datasets/load.py", line 2556, in load_dataset
builder_instance = load_dataset_builder(
File "/home/xxx/miniconda3/envs/py310/lib/python3.10/site-packages/datasets/load.py", line 2228, in load_dataset_builder
dataset_module = dataset_module_factory(
File "/home/xxx/miniconda3/envs/py310/lib/python3.10/site-packages/datasets/load.py", line 1879, in dataset_module_factory
raise e1 from None
File "/home/xxx/miniconda3/envs/py310/lib/python3.10/site-packages/datasets/load.py", line 1831, in dataset_module_factory
can_load_config_from_parquet_export = "DEFAULT_CONFIG_NAME" not in f.read()
File "/home/xxx/miniconda3/envs/py310/lib/python3.10/codecs.py", line 322, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0x8b in position 1: invalid start byte
```
### Steps to reproduce the bug
1. Using Python3.10/3.11
2. Install datasets-2.18.0
3. test with
```
from datasets import load_dataset
dataset = load_dataset("codeparrot/apps")
```
### Expected behavior
Normally it should manage to download and load the dataset without such error.
### Environment info
Ubuntu, Python3.10/3.11 | null | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6760/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6760/timeline | null | null | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6759 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6759/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6759/comments | https://api.github.com/repos/huggingface/datasets/issues/6759/events | https://github.com/huggingface/datasets/issues/6759 | 2,208,892,891 | I_kwDODunzps6DqQfb | 6,759 | Persistent multi-process Pool | {
"avatar_url": "https://avatars.githubusercontent.com/u/4337024?v=4",
"events_url": "https://api.github.com/users/fostiropoulos/events{/privacy}",
"followers_url": "https://api.github.com/users/fostiropoulos/followers",
"following_url": "https://api.github.com/users/fostiropoulos/following{/other_user}",
"gists_url": "https://api.github.com/users/fostiropoulos/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/fostiropoulos",
"id": 4337024,
"login": "fostiropoulos",
"node_id": "MDQ6VXNlcjQzMzcwMjQ=",
"organizations_url": "https://api.github.com/users/fostiropoulos/orgs",
"received_events_url": "https://api.github.com/users/fostiropoulos/received_events",
"repos_url": "https://api.github.com/users/fostiropoulos/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/fostiropoulos/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/fostiropoulos/subscriptions",
"type": "User",
"url": "https://api.github.com/users/fostiropoulos",
"user_view_type": "public"
} | [
{
"color": "a2eeef",
"default": true,
"description": "New feature or request",
"id": 1935892871,
"name": "enhancement",
"node_id": "MDU6TGFiZWwxOTM1ODkyODcx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement"
}
] | open | false | null | [] | null | 0 | 2024-03-26T17:35:25Z | 2024-03-26T17:35:25Z | null | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Feature request
Running .map and filter functions with `num_procs` consecutively instantiates several multiprocessing pools iteratively.
As instantiating a Pool is very resource intensive it can be a bottleneck to performing iteratively filtering.
My ideas:
1. There should be an option to declare `persistent_workers` similar to pytorch DataLoader. Downside would be that would be complex to determine the correct resource allocation and deallocation of the pool. i.e. the dataset can outlive the utility of the pool.
2. Provide a pool as an argument. Downside would be the expertise required by the user. Upside, is that there is better resource management.
### Motivation
Is really slow to iteratively perform map and filter operations on a dataset.
### Your contribution
If approved I could integrate it. I would need to know what method would be most suitable to implement from the two options above. | null | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6759/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6759/timeline | null | null | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6758 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6758/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6758/comments | https://api.github.com/repos/huggingface/datasets/issues/6758/events | https://github.com/huggingface/datasets/issues/6758 | 2,208,494,302 | I_kwDODunzps6DovLe | 6,758 | Passing `sample_by` to `load_dataset` when loading text data does not work | {
"avatar_url": "https://avatars.githubusercontent.com/u/823693?v=4",
"events_url": "https://api.github.com/users/ntoxeg/events{/privacy}",
"followers_url": "https://api.github.com/users/ntoxeg/followers",
"following_url": "https://api.github.com/users/ntoxeg/following{/other_user}",
"gists_url": "https://api.github.com/users/ntoxeg/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/ntoxeg",
"id": 823693,
"login": "ntoxeg",
"node_id": "MDQ6VXNlcjgyMzY5Mw==",
"organizations_url": "https://api.github.com/users/ntoxeg/orgs",
"received_events_url": "https://api.github.com/users/ntoxeg/received_events",
"repos_url": "https://api.github.com/users/ntoxeg/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/ntoxeg/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/ntoxeg/subscriptions",
"type": "User",
"url": "https://api.github.com/users/ntoxeg",
"user_view_type": "public"
} | [] | closed | false | {
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/lhoestq",
"id": 42851186,
"login": "lhoestq",
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"type": "User",
"url": "https://api.github.com/users/lhoestq",
"user_view_type": "public"
} | [
{
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/lhoestq",
"id": 42851186,
"login": "lhoestq",
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"type": "User",
"url": "https://api.github.com/users/lhoestq",
"user_view_type": "public"
}
] | null | 1 | 2024-03-26T14:55:33Z | 2024-04-09T11:27:59Z | 2024-04-09T11:27:59Z | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
I have a dataset that consists of a bunch of text files, each representing an example. There is an undocumented `sample_by` argument for the `TextConfig` class that is used by `Text` to decide whether to split files into lines, paragraphs or take them whole. Passing `sample_by=βdocumentβ` to `load_dataset` results in files getting split into lines regardless. I have edited `src/datasets/packaged_modules/text/text.py` for myself to switch the default and it works fine.
As a side note, the `if-else` for `sample_by` will silently load an empty dataset if someone makes a typo in the argument, which is not ideal.
### Steps to reproduce the bug
1. Prepare data as a bunch of files in a directory.
2. Load that data via `load_dataset(βtextβ, data_files=<data_dir>/<files_glob>, β¦, sample_by=βdocumentβ)`.
3. Inspect the resultant dataset β every item should have the form of `{βtextβ: <a line from a file>}`.
### Expected behavior
`load_dataset(βtextβ, data_files=<data_dir>/<files_glob>, β¦, sample_by=βdocumentβ)` should result in a dataset with items of the form `{βtextβ: <one document>}`.
### Environment info
- `datasets` version: 2.18.0
- Platform: Linux-5.15.0-1046-nvidia-x86_64-with-glibc2.35
- Python version: 3.11.8
- `huggingface_hub` version: 0.21.4
- PyArrow version: 15.0.2
- Pandas version: 2.2.1
- `fsspec` version: 2024.2.0 | {
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/lhoestq",
"id": 42851186,
"login": "lhoestq",
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"type": "User",
"url": "https://api.github.com/users/lhoestq",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6758/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6758/timeline | null | completed | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6757 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6757/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6757/comments | https://api.github.com/repos/huggingface/datasets/issues/6757/events | https://github.com/huggingface/datasets/pull/6757 | 2,206,280,340 | PR_kwDODunzps5qr7Li | 6,757 | Test disabling transformers containers in docs CI | {
"avatar_url": "https://avatars.githubusercontent.com/u/11801849?v=4",
"events_url": "https://api.github.com/users/Wauplin/events{/privacy}",
"followers_url": "https://api.github.com/users/Wauplin/followers",
"following_url": "https://api.github.com/users/Wauplin/following{/other_user}",
"gists_url": "https://api.github.com/users/Wauplin/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/Wauplin",
"id": 11801849,
"login": "Wauplin",
"node_id": "MDQ6VXNlcjExODAxODQ5",
"organizations_url": "https://api.github.com/users/Wauplin/orgs",
"received_events_url": "https://api.github.com/users/Wauplin/received_events",
"repos_url": "https://api.github.com/users/Wauplin/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/Wauplin/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Wauplin/subscriptions",
"type": "User",
"url": "https://api.github.com/users/Wauplin",
"user_view_type": "public"
} | [] | open | false | null | [] | null | 3 | 2024-03-25T17:16:11Z | 2024-03-27T16:26:35Z | null | CONTRIBUTOR | null | null | null | Related to https://github.com/huggingface/doc-builder/pull/487 and [internal slack thread](https://huggingface.slack.com/archives/C04F8N7FQNL/p1711384899462349?thread_ts=1711041424.720769&cid=C04F8N7FQNL). There is now a `custom_container` option when building docs in CI. When set to `""` (instead of `"huggingface/transformers-doc-builder"` by default), we don't run the CI inside a container, therefore saving ~2min of download time. The plan is to test disabling the transformers container on a few "big" repo and if everything works correctly, we will stop making it the default container. More details on https://github.com/huggingface/doc-builder/pull/487.
cc @mishig25 | null | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 1,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 1,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6757/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6757/timeline | null | null | true | {
"diff_url": "https://github.com/huggingface/datasets/pull/6757.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6757",
"merged_at": null,
"patch_url": "https://github.com/huggingface/datasets/pull/6757.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6757"
} |
https://api.github.com/repos/huggingface/datasets/issues/6756 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6756/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6756/comments | https://api.github.com/repos/huggingface/datasets/issues/6756/events | https://github.com/huggingface/datasets/issues/6756 | 2,205,557,725 | I_kwDODunzps6DdiPd | 6,756 | Support SQLite files? | {
"avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4",
"events_url": "https://api.github.com/users/severo/events{/privacy}",
"followers_url": "https://api.github.com/users/severo/followers",
"following_url": "https://api.github.com/users/severo/following{/other_user}",
"gists_url": "https://api.github.com/users/severo/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/severo",
"id": 1676121,
"login": "severo",
"node_id": "MDQ6VXNlcjE2NzYxMjE=",
"organizations_url": "https://api.github.com/users/severo/orgs",
"received_events_url": "https://api.github.com/users/severo/received_events",
"repos_url": "https://api.github.com/users/severo/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/severo/subscriptions",
"type": "User",
"url": "https://api.github.com/users/severo",
"user_view_type": "public"
} | [
{
"color": "a2eeef",
"default": true,
"description": "New feature or request",
"id": 1935892871,
"name": "enhancement",
"node_id": "MDU6TGFiZWwxOTM1ODkyODcx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement"
}
] | closed | false | null | [] | null | 3 | 2024-03-25T11:48:05Z | 2024-03-26T16:09:32Z | 2024-03-26T16:09:32Z | COLLABORATOR | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Feature request
Support loading a dataset from a SQLite file
https://huggingface.co/datasets/severo/test_iris_sqlite/tree/main
### Motivation
SQLite is a popular file format.
### Your contribution
See discussion on slack: https://huggingface.slack.com/archives/C04L6P8KNQ5/p1702481859117909 (internal)
In particular: a SQLite file can contain multiple tables, which might be matched to multiple configs. Maybe the detail of splits and configs should be defined in the README YAML, or use the same format as for ZIP files: `Iris.sqlite::Iris`.
See dataset here: https://huggingface.co/datasets/severo/test_iris_sqlite
Note: should we also support DuckDB files? | {
"avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4",
"events_url": "https://api.github.com/users/severo/events{/privacy}",
"followers_url": "https://api.github.com/users/severo/followers",
"following_url": "https://api.github.com/users/severo/following{/other_user}",
"gists_url": "https://api.github.com/users/severo/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/severo",
"id": 1676121,
"login": "severo",
"node_id": "MDQ6VXNlcjE2NzYxMjE=",
"organizations_url": "https://api.github.com/users/severo/orgs",
"received_events_url": "https://api.github.com/users/severo/received_events",
"repos_url": "https://api.github.com/users/severo/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/severo/subscriptions",
"type": "User",
"url": "https://api.github.com/users/severo",
"user_view_type": "public"
} | {
"+1": 1,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 1,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6756/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6756/timeline | null | completed | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6755 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6755/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6755/comments | https://api.github.com/repos/huggingface/datasets/issues/6755/events | https://github.com/huggingface/datasets/issues/6755 | 2,204,573,289 | I_kwDODunzps6DZx5p | 6,755 | Small typo on the documentation | {
"avatar_url": "https://avatars.githubusercontent.com/u/4337024?v=4",
"events_url": "https://api.github.com/users/fostiropoulos/events{/privacy}",
"followers_url": "https://api.github.com/users/fostiropoulos/followers",
"following_url": "https://api.github.com/users/fostiropoulos/following{/other_user}",
"gists_url": "https://api.github.com/users/fostiropoulos/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/fostiropoulos",
"id": 4337024,
"login": "fostiropoulos",
"node_id": "MDQ6VXNlcjQzMzcwMjQ=",
"organizations_url": "https://api.github.com/users/fostiropoulos/orgs",
"received_events_url": "https://api.github.com/users/fostiropoulos/received_events",
"repos_url": "https://api.github.com/users/fostiropoulos/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/fostiropoulos/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/fostiropoulos/subscriptions",
"type": "User",
"url": "https://api.github.com/users/fostiropoulos",
"user_view_type": "public"
} | [
{
"color": "7057ff",
"default": true,
"description": "Good for newcomers",
"id": 1935892877,
"name": "good first issue",
"node_id": "MDU6TGFiZWwxOTM1ODkyODc3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/good%20first%20issue"
}
] | closed | false | {
"avatar_url": "https://avatars.githubusercontent.com/u/63234112?v=4",
"events_url": "https://api.github.com/users/JINO-ROHIT/events{/privacy}",
"followers_url": "https://api.github.com/users/JINO-ROHIT/followers",
"following_url": "https://api.github.com/users/JINO-ROHIT/following{/other_user}",
"gists_url": "https://api.github.com/users/JINO-ROHIT/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/JINO-ROHIT",
"id": 63234112,
"login": "JINO-ROHIT",
"node_id": "MDQ6VXNlcjYzMjM0MTEy",
"organizations_url": "https://api.github.com/users/JINO-ROHIT/orgs",
"received_events_url": "https://api.github.com/users/JINO-ROHIT/received_events",
"repos_url": "https://api.github.com/users/JINO-ROHIT/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/JINO-ROHIT/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/JINO-ROHIT/subscriptions",
"type": "User",
"url": "https://api.github.com/users/JINO-ROHIT",
"user_view_type": "public"
} | [
{
"avatar_url": "https://avatars.githubusercontent.com/u/63234112?v=4",
"events_url": "https://api.github.com/users/JINO-ROHIT/events{/privacy}",
"followers_url": "https://api.github.com/users/JINO-ROHIT/followers",
"following_url": "https://api.github.com/users/JINO-ROHIT/following{/other_user}",
"gists_url": "https://api.github.com/users/JINO-ROHIT/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/JINO-ROHIT",
"id": 63234112,
"login": "JINO-ROHIT",
"node_id": "MDQ6VXNlcjYzMjM0MTEy",
"organizations_url": "https://api.github.com/users/JINO-ROHIT/orgs",
"received_events_url": "https://api.github.com/users/JINO-ROHIT/received_events",
"repos_url": "https://api.github.com/users/JINO-ROHIT/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/JINO-ROHIT/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/JINO-ROHIT/subscriptions",
"type": "User",
"url": "https://api.github.com/users/JINO-ROHIT",
"user_view_type": "public"
}
] | null | 3 | 2024-03-24T21:47:52Z | 2024-04-02T14:01:19Z | 2024-04-02T14:01:19Z | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
There is a small typo on https://github.com/huggingface/datasets/blob/d5468836fe94e8be1ae093397dd43d4a2503b926/src/datasets/dataset_dict.py#L938
It should be `caching is enabled`.
### Steps to reproduce the bug
Please visit
https://github.com/huggingface/datasets/blob/d5468836fe94e8be1ae093397dd43d4a2503b926/src/datasets/dataset_dict.py#L938
### Expected behavior
`caching is enabled`
### Environment info
- `datasets` version: 2.17.1
- Platform: Linux-5.15.0-101-generic-x86_64-with-glibc2.35
- Python version: 3.11.7
- `huggingface_hub` version: 0.20.3
- PyArrow version: 15.0.0
- Pandas version: 2.2.1
- `fsspec` version: 2023.10.0 | {
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mariosasko",
"id": 47462742,
"login": "mariosasko",
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mariosasko",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6755/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6755/timeline | null | completed | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6754 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6754/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6754/comments | https://api.github.com/repos/huggingface/datasets/issues/6754/events | https://github.com/huggingface/datasets/pull/6754 | 2,204,214,595 | PR_kwDODunzps5qk-nr | 6,754 | Fix cache path to snakecase for `CachedDatasetModuleFactory` and `Cache` | {
"avatar_url": "https://avatars.githubusercontent.com/u/26690193?v=4",
"events_url": "https://api.github.com/users/izhx/events{/privacy}",
"followers_url": "https://api.github.com/users/izhx/followers",
"following_url": "https://api.github.com/users/izhx/following{/other_user}",
"gists_url": "https://api.github.com/users/izhx/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/izhx",
"id": 26690193,
"login": "izhx",
"node_id": "MDQ6VXNlcjI2NjkwMTkz",
"organizations_url": "https://api.github.com/users/izhx/orgs",
"received_events_url": "https://api.github.com/users/izhx/received_events",
"repos_url": "https://api.github.com/users/izhx/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/izhx/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/izhx/subscriptions",
"type": "User",
"url": "https://api.github.com/users/izhx",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 6 | 2024-03-24T06:59:15Z | 2024-04-15T15:45:44Z | 2024-04-15T15:38:51Z | CONTRIBUTOR | null | null | null | Fix https://github.com/huggingface/datasets/issues/6750#issuecomment-2016678729
I didn't find a guideline on how to run the tests, so i just run the following steps to make sure that this bug is fixed.
1. `python test.py`,
2. then `HF_DATASETS_OFFLINE=1 python test.py`
The `test.py` is
```
import datasets
datasets.utils.logging.set_verbosity_info()
ds = datasets.load_dataset('izhx/STS17-debug')
print(ds)
ds = datasets.load_dataset('C-MTEB/AFQMC', revision='b44c3b011063adb25877c13823db83bb193913c4')
print(ds)
```
| {
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/lhoestq",
"id": 42851186,
"login": "lhoestq",
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"type": "User",
"url": "https://api.github.com/users/lhoestq",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6754/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6754/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6754.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6754",
"merged_at": "2024-04-15T15:38:51Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6754.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6754"
} |
https://api.github.com/repos/huggingface/datasets/issues/6753 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6753/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6753/comments | https://api.github.com/repos/huggingface/datasets/issues/6753/events | https://github.com/huggingface/datasets/issues/6753 | 2,204,155,091 | I_kwDODunzps6DYLzT | 6,753 | Type error when importing datasets on Kaggle | {
"avatar_url": "https://avatars.githubusercontent.com/u/18300717?v=4",
"events_url": "https://api.github.com/users/jtv199/events{/privacy}",
"followers_url": "https://api.github.com/users/jtv199/followers",
"following_url": "https://api.github.com/users/jtv199/following{/other_user}",
"gists_url": "https://api.github.com/users/jtv199/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jtv199",
"id": 18300717,
"login": "jtv199",
"node_id": "MDQ6VXNlcjE4MzAwNzE3",
"organizations_url": "https://api.github.com/users/jtv199/orgs",
"received_events_url": "https://api.github.com/users/jtv199/received_events",
"repos_url": "https://api.github.com/users/jtv199/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jtv199/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jtv199/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jtv199",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 8 | 2024-03-24T03:01:30Z | 2024-10-02T11:49:35Z | 2024-03-30T00:23:49Z | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
When trying to run
```
import datasets
print(datasets.__version__)
```
It generates the following error
```
TypeError: expected string or bytes-like object
```
It looks like It cannot find the valid versions of `fsspec`
though fsspec version is fine when I checked Via command
```
import fsspec
print(fsspec.__version__)
β
# output: 2024.3.1
```
Detailed crash report
```
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
Cell In[1], line 1
----> 1 import datasets
2 print(datasets.__version__)
File /opt/conda/lib/python3.10/site-packages/datasets/__init__.py:18
1 # ruff: noqa
2 # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
3 #
(...)
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
16 __version__ = "2.18.0"
---> 18 from .arrow_dataset import Dataset
19 from .arrow_reader import ReadInstruction
20 from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
File /opt/conda/lib/python3.10/site-packages/datasets/arrow_dataset.py:66
63 from multiprocess import Pool
64 from tqdm.contrib.concurrent import thread_map
---> 66 from . import config
67 from .arrow_reader import ArrowReader
68 from .arrow_writer import ArrowWriter, OptimizedTypedSequence
File /opt/conda/lib/python3.10/site-packages/datasets/config.py:41
39 # Imports
40 DILL_VERSION = version.parse(importlib.metadata.version("dill"))
---> 41 FSSPEC_VERSION = version.parse(importlib.metadata.version("fsspec"))
42 PANDAS_VERSION = version.parse(importlib.metadata.version("pandas"))
43 PYARROW_VERSION = version.parse(importlib.metadata.version("pyarrow"))
File /opt/conda/lib/python3.10/site-packages/packaging/version.py:49, in parse(version)
43 """
44 Parse the given version string and return either a :class:`Version` object
45 or a :class:`LegacyVersion` object depending on if the given version is
46 a valid PEP 440 version or a legacy version.
47 """
48 try:
---> 49 return Version(version)
50 except InvalidVersion:
51 return LegacyVersion(version)
File /opt/conda/lib/python3.10/site-packages/packaging/version.py:264, in Version.__init__(self, version)
261 def __init__(self, version: str) -> None:
262
263 # Validate the version and parse it into pieces
--> 264 match = self._regex.search(version)
265 if not match:
266 raise InvalidVersion(f"Invalid version: '{version}'")
TypeError: expected string or bytes-like object
```
### Steps to reproduce the bug
1. run `!pip install -U datasets` on kaggle
2. check datasets is installed via
```
import datasets
print(datasets.__version__)
```
### Expected behavior
Expected to print datasets version, like `2.18.0`
### Environment info
Running on Kaggle, latest enviornment , here is the notebook https://www.kaggle.com/code/jtv199/mistrial-7b-part2 | {
"avatar_url": "https://avatars.githubusercontent.com/u/18300717?v=4",
"events_url": "https://api.github.com/users/jtv199/events{/privacy}",
"followers_url": "https://api.github.com/users/jtv199/followers",
"following_url": "https://api.github.com/users/jtv199/following{/other_user}",
"gists_url": "https://api.github.com/users/jtv199/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jtv199",
"id": 18300717,
"login": "jtv199",
"node_id": "MDQ6VXNlcjE4MzAwNzE3",
"organizations_url": "https://api.github.com/users/jtv199/orgs",
"received_events_url": "https://api.github.com/users/jtv199/received_events",
"repos_url": "https://api.github.com/users/jtv199/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jtv199/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jtv199/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jtv199",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6753/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6753/timeline | null | completed | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6752 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6752/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6752/comments | https://api.github.com/repos/huggingface/datasets/issues/6752/events | https://github.com/huggingface/datasets/issues/6752 | 2,204,043,839 | I_kwDODunzps6DXwo_ | 6,752 | Precision being changed from float16 to float32 unexpectedly | {
"avatar_url": "https://avatars.githubusercontent.com/u/21228908?v=4",
"events_url": "https://api.github.com/users/gcervantes8/events{/privacy}",
"followers_url": "https://api.github.com/users/gcervantes8/followers",
"following_url": "https://api.github.com/users/gcervantes8/following{/other_user}",
"gists_url": "https://api.github.com/users/gcervantes8/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/gcervantes8",
"id": 21228908,
"login": "gcervantes8",
"node_id": "MDQ6VXNlcjIxMjI4OTA4",
"organizations_url": "https://api.github.com/users/gcervantes8/orgs",
"received_events_url": "https://api.github.com/users/gcervantes8/received_events",
"repos_url": "https://api.github.com/users/gcervantes8/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/gcervantes8/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/gcervantes8/subscriptions",
"type": "User",
"url": "https://api.github.com/users/gcervantes8",
"user_view_type": "public"
} | [] | open | false | null | [] | null | 1 | 2024-03-23T20:53:56Z | 2024-04-10T15:21:33Z | null | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
I'm loading a HuggingFace Dataset for images.
I'm running a preprocessing (map operation) step that runs a few operations, one of them being conversion to float16. The Dataset features also say that the 'img' is of type float16. Whenever I take an image from that HuggingFace Dataset instance, the type turns out to be float32.
### Steps to reproduce the bug
```python
import torchvision.transforms.v2 as transforms
from datasets import load_dataset
dataset = load_dataset('cifar10', split='test')
dataset = dataset.with_format("torch")
data_transform = transforms.Compose([transforms.Resize((32, 32)),
transforms.ToDtype(torch.float16, scale=True),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
])
def _preprocess(examples):
# Permutes from (BS x H x W x C) to (BS x C x H x W)
images = torch.permute(examples['img'], (0, 3, 2, 1))
examples['img'] = data_transform(images)
return examples
dataset = dataset.map(_preprocess, batched=True, batch_size=8)
```
Now at this point the dataset.features are showing float16 which is great because that's what I want.
```python
print(data_loader.features['img'])
Sequence(feature=Sequence(feature=Sequence(feature=Value(dtype='float16', id=None), length=-1, id=None), length=-1, id=None), length=-1, id=None)
```
But when I try to sample an image from this dataloader; I'm getting a float32 image, when I'm expecting float16:
```python
print(next(iter(data_loader))['img'].dtype)
torch.float32
```
### Expected behavior
I'm expecting the images loaded after the transformation to stay in float16.
### Environment info
- `datasets` version: 2.18.0
- Platform: Linux-5.15.146.1-microsoft-standard-WSL2-x86_64-with-glibc2.31
- Python version: 3.10.9
- `huggingface_hub` version: 0.21.4
- PyArrow version: 14.0.2
- Pandas version: 2.0.3
- `fsspec` version: 2023.10.0 | null | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6752/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6752/timeline | null | null | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6751 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6751/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6751/comments | https://api.github.com/repos/huggingface/datasets/issues/6751/events | https://github.com/huggingface/datasets/pull/6751 | 2,203,951,501 | PR_kwDODunzps5qkKLH | 6,751 | Use 'with' operator for some download functions | {
"avatar_url": "https://avatars.githubusercontent.com/u/31669?v=4",
"events_url": "https://api.github.com/users/Moisan/events{/privacy}",
"followers_url": "https://api.github.com/users/Moisan/followers",
"following_url": "https://api.github.com/users/Moisan/following{/other_user}",
"gists_url": "https://api.github.com/users/Moisan/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/Moisan",
"id": 31669,
"login": "Moisan",
"node_id": "MDQ6VXNlcjMxNjY5",
"organizations_url": "https://api.github.com/users/Moisan/orgs",
"received_events_url": "https://api.github.com/users/Moisan/received_events",
"repos_url": "https://api.github.com/users/Moisan/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/Moisan/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Moisan/subscriptions",
"type": "User",
"url": "https://api.github.com/users/Moisan",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 2 | 2024-03-23T16:32:08Z | 2024-03-26T00:40:57Z | 2024-03-26T00:40:57Z | NONE | null | null | null | Some functions in `streaming_download_manager.py` are not closing the file they open which lead to `Unclosed file` warnings in our code. This fixes a few of them. | {
"avatar_url": "https://avatars.githubusercontent.com/u/31669?v=4",
"events_url": "https://api.github.com/users/Moisan/events{/privacy}",
"followers_url": "https://api.github.com/users/Moisan/followers",
"following_url": "https://api.github.com/users/Moisan/following{/other_user}",
"gists_url": "https://api.github.com/users/Moisan/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/Moisan",
"id": 31669,
"login": "Moisan",
"node_id": "MDQ6VXNlcjMxNjY5",
"organizations_url": "https://api.github.com/users/Moisan/orgs",
"received_events_url": "https://api.github.com/users/Moisan/received_events",
"repos_url": "https://api.github.com/users/Moisan/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/Moisan/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Moisan/subscriptions",
"type": "User",
"url": "https://api.github.com/users/Moisan",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6751/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6751/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6751.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6751",
"merged_at": null,
"patch_url": "https://github.com/huggingface/datasets/pull/6751.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6751"
} |
https://api.github.com/repos/huggingface/datasets/issues/6750 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6750/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6750/comments | https://api.github.com/repos/huggingface/datasets/issues/6750/events | https://github.com/huggingface/datasets/issues/6750 | 2,203,590,658 | I_kwDODunzps6DWCAC | 6,750 | `load_dataset` requires a network connection for local download? | {
"avatar_url": "https://avatars.githubusercontent.com/u/6306695?v=4",
"events_url": "https://api.github.com/users/MiroFurtado/events{/privacy}",
"followers_url": "https://api.github.com/users/MiroFurtado/followers",
"following_url": "https://api.github.com/users/MiroFurtado/following{/other_user}",
"gists_url": "https://api.github.com/users/MiroFurtado/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/MiroFurtado",
"id": 6306695,
"login": "MiroFurtado",
"node_id": "MDQ6VXNlcjYzMDY2OTU=",
"organizations_url": "https://api.github.com/users/MiroFurtado/orgs",
"received_events_url": "https://api.github.com/users/MiroFurtado/received_events",
"repos_url": "https://api.github.com/users/MiroFurtado/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/MiroFurtado/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/MiroFurtado/subscriptions",
"type": "User",
"url": "https://api.github.com/users/MiroFurtado",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 3 | 2024-03-23T01:06:32Z | 2024-04-15T15:38:52Z | 2024-04-15T15:38:52Z | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
Hi all - I see that in the past a network dependency has been mistakenly introduced into `load_dataset` even for local loads. Is it possible this has happened again?
### Steps to reproduce the bug
```
>>> import datasets
>>> datasets.load_dataset("hh-rlhf")
Repo card metadata block was not found. Setting CardData to empty.
*hangs bc i'm firewalled*
````
stack trace from ctrl-c:
```
^CTraceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/jobuser/.local/lib/python3.10/site-packages/datasets/load.py", line 2582, in load_dataset
builder_instance.download_and_prepare(
output_path = get_from_cache( [0/122]
File "/home/jobuser/.local/lib/python3.10/site-packages/datasets/utils/file_utils.py", line 532, in get_from_cache
response = http_head(
File "/home/jobuser/.local/lib/python3.10/site-packages/datasets/utils/file_utils.py", line 419, in http_head
response = _request_with_retry(
File "/home/jobuser/.local/lib/python3.10/site-packages/datasets/utils/file_utils.py", line 304, in _request_with_retry
response = requests.request(method=method.upper(), url=url, timeout=timeout, **params)
File "/home/jobuser/build/lipy-flytekit-image/environments/satellites/python/lib/python3.10/site-packages/requests/api.py", line 59, in request
return session.request(method=method, url=url, **kwargs)
File "/home/jobuser/build/lipy-flytekit-image/environments/satellites/python/lib/python3.10/site-packages/requests/sessions.py", line 587, in request
resp = self.send(prep, **send_kwargs)
File "/home/jobuser/build/lipy-flytekit-image/environments/satellites/python/lib/python3.10/site-packages/requests/sessions.py", line 701, in send
r = adapter.send(request, **kwargs)
File "/home/jobuser/build/lipy-flytekit-image/environments/satellites/python/lib/python3.10/site-packages/requests/adapters.py", line 487, in send
resp = conn.urlopen(
File "/home/jobuser/build/lipy-flytekit-image/environments/satellites/python/lib/python3.10/site-packages/urllib3/connectionpool.py", line 703, in urlopen
httplib_response = self._make_request(
File "/home/jobuser/build/lipy-flytekit-image/environments/satellites/python/lib/python3.10/site-packages/urllib3/connectionpool.py", line 386, in _make_request
self._validate_conn(conn)
File "/home/jobuser/build/lipy-flytekit-image/environments/satellites/python/lib/python3.10/site-packages/urllib3/connectionpool.py", line 1042, in _validate_conn
conn.connect()
File "/home/jobuser/build/lipy-flytekit-image/environments/satellites/python/lib/python3.10/site-packages/urllib3/connection.py", line 363, in connect
self.sock = conn = self._new_conn()
File "/home/jobuser/build/lipy-flytekit-image/environments/satellites/python/lib/python3.10/site-packages/urllib3/connection.py", line 174, in _new_conn
conn = connection.create_connection(
File "/home/jobuser/build/lipy-flytekit-image/environments/satellites/python/lib/python3.10/site-packages/urllib3/util/connection.py", line 85, in create_connection
sock.connect(sa)
KeyboardInterrupt
```
### Expected behavior
loads the dataset
### Environment info
```
> pip show datasets
Name: datasets
Version: 2.18.0
```
Python 3.10.2 | {
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/lhoestq",
"id": 42851186,
"login": "lhoestq",
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"type": "User",
"url": "https://api.github.com/users/lhoestq",
"user_view_type": "public"
} | {
"+1": 1,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 1,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6750/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6750/timeline | null | completed | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6749 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6749/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6749/comments | https://api.github.com/repos/huggingface/datasets/issues/6749/events | https://github.com/huggingface/datasets/pull/6749 | 2,202,310,116 | PR_kwDODunzps5qeoSk | 6,749 | Fix fsspec tqdm callback | {
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/lhoestq",
"id": 42851186,
"login": "lhoestq",
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"type": "User",
"url": "https://api.github.com/users/lhoestq",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 2 | 2024-03-22T11:44:11Z | 2024-03-22T14:51:45Z | 2024-03-22T14:45:39Z | MEMBER | null | null | null | Following changes at https://github.com/fsspec/filesystem_spec/pull/1497 for `fsspec>=2024.2.0` | {
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/lhoestq",
"id": 42851186,
"login": "lhoestq",
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"type": "User",
"url": "https://api.github.com/users/lhoestq",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6749/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6749/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6749.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6749",
"merged_at": "2024-03-22T14:45:39Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6749.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6749"
} |
https://api.github.com/repos/huggingface/datasets/issues/6748 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6748/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6748/comments | https://api.github.com/repos/huggingface/datasets/issues/6748/events | https://github.com/huggingface/datasets/issues/6748 | 2,201,517,348 | I_kwDODunzps6DOH0k | 6,748 | Strange slicing behavior | {
"avatar_url": "https://avatars.githubusercontent.com/u/20135317?v=4",
"events_url": "https://api.github.com/users/Luciennnnnnn/events{/privacy}",
"followers_url": "https://api.github.com/users/Luciennnnnnn/followers",
"following_url": "https://api.github.com/users/Luciennnnnnn/following{/other_user}",
"gists_url": "https://api.github.com/users/Luciennnnnnn/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/Luciennnnnnn",
"id": 20135317,
"login": "Luciennnnnnn",
"node_id": "MDQ6VXNlcjIwMTM1MzE3",
"organizations_url": "https://api.github.com/users/Luciennnnnnn/orgs",
"received_events_url": "https://api.github.com/users/Luciennnnnnn/received_events",
"repos_url": "https://api.github.com/users/Luciennnnnnn/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/Luciennnnnnn/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Luciennnnnnn/subscriptions",
"type": "User",
"url": "https://api.github.com/users/Luciennnnnnn",
"user_view_type": "public"
} | [] | open | false | null | [] | null | 1 | 2024-03-22T01:49:13Z | 2024-03-22T16:43:57Z | null | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
I have loaded a dataset, and then slice first 300 samples using `:` ops, however, the resulting dataset is not expected, as the output below:
```bash
len(dataset)=1050324
len(dataset[:300])=2
len(dataset[0:300])=2
len(dataset.select(range(300)))=300
```
### Steps to reproduce the bug
load a dataset then:
```bash
dataset = load_from_disk(args.train_data_dir)
print(f"{len(dataset)=}", flush=True)
print(f"{len(dataset[:300])=}", flush=True)
print(f"{len(dataset[0:300])=}", flush=True)
print(f"{len(dataset.select(range(300)))=}", flush=True)
```
### Expected behavior
```bash
len(dataset)=1050324
len(dataset[:300])=300
len(dataset[0:300])=300
len(dataset.select(range(300)))=300
```
### Environment info
- `datasets` version: 2.16.1
- Platform: Linux-5.15.0-60-generic-x86_64-with-glibc2.35
- Python version: 3.10.11
- `huggingface_hub` version: 0.20.2
- PyArrow version: 10.0.1
- Pandas version: 1.5.3
- `fsspec` version: 2023.10.0 | null | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6748/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6748/timeline | null | null | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6747 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6747/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6747/comments | https://api.github.com/repos/huggingface/datasets/issues/6747/events | https://github.com/huggingface/datasets/pull/6747 | 2,201,219,384 | PR_kwDODunzps5qa5L- | 6,747 | chore(deps): bump fsspec | {
"avatar_url": "https://avatars.githubusercontent.com/u/3659196?v=4",
"events_url": "https://api.github.com/users/shcheklein/events{/privacy}",
"followers_url": "https://api.github.com/users/shcheklein/followers",
"following_url": "https://api.github.com/users/shcheklein/following{/other_user}",
"gists_url": "https://api.github.com/users/shcheklein/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/shcheklein",
"id": 3659196,
"login": "shcheklein",
"node_id": "MDQ6VXNlcjM2NTkxOTY=",
"organizations_url": "https://api.github.com/users/shcheklein/orgs",
"received_events_url": "https://api.github.com/users/shcheklein/received_events",
"repos_url": "https://api.github.com/users/shcheklein/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/shcheklein/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/shcheklein/subscriptions",
"type": "User",
"url": "https://api.github.com/users/shcheklein",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 2 | 2024-03-21T21:25:49Z | 2024-03-22T16:40:15Z | 2024-03-22T16:28:40Z | CONTRIBUTOR | null | null | null | There were a few fixes released recently, some DVC ecosystem packages require newer version of `fsspec`. | {
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mariosasko",
"id": 47462742,
"login": "mariosasko",
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mariosasko",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6747/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6747/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6747.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6747",
"merged_at": "2024-03-22T16:28:40Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6747.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6747"
} |
https://api.github.com/repos/huggingface/datasets/issues/6746 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6746/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6746/comments | https://api.github.com/repos/huggingface/datasets/issues/6746/events | https://github.com/huggingface/datasets/issues/6746 | 2,198,993,949 | I_kwDODunzps6DEfwd | 6,746 | ExpectedMoreSplits error when loading C4 dataset | {
"avatar_url": "https://avatars.githubusercontent.com/u/65165345?v=4",
"events_url": "https://api.github.com/users/billwang485/events{/privacy}",
"followers_url": "https://api.github.com/users/billwang485/followers",
"following_url": "https://api.github.com/users/billwang485/following{/other_user}",
"gists_url": "https://api.github.com/users/billwang485/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/billwang485",
"id": 65165345,
"login": "billwang485",
"node_id": "MDQ6VXNlcjY1MTY1MzQ1",
"organizations_url": "https://api.github.com/users/billwang485/orgs",
"received_events_url": "https://api.github.com/users/billwang485/received_events",
"repos_url": "https://api.github.com/users/billwang485/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/billwang485/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/billwang485/subscriptions",
"type": "User",
"url": "https://api.github.com/users/billwang485",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 8 | 2024-03-21T02:53:04Z | 2024-09-18T19:57:14Z | 2024-07-29T07:21:08Z | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
I encounter bug when running the example command line
```python
python main.py \
--model decapoda-research/llama-7b-hf \
--prune_method wanda \
--sparsity_ratio 0.5 \
--sparsity_type unstructured \
--save out/llama_7b/unstructured/wanda/
```
The bug occurred at these lines of code (when loading c4 dataset)
```python
traindata = load_dataset('allenai/c4', 'allenai--c4', data_files={'train': 'en/c4-train.00000-of-01024.json.gz'}, split='train')
valdata = load_dataset('allenai/c4', 'allenai--c4', data_files={'validation': 'en/c4-validation.00000-of-00008.json.gz'}, split='validation')
```
The error message states:
```
raise ExpectedMoreSplits(str(set(expected_splits) - set(recorded_splits)))
datasets.utils.info_utils.ExpectedMoreSplits: {'validation'}
```
### Steps to reproduce the bug
1. I encounter bug when running the example command line
### Expected behavior
The error message states:
```
raise ExpectedMoreSplits(str(set(expected_splits) - set(recorded_splits)))
datasets.utils.info_utils.ExpectedMoreSplits: {'validation'}
```
### Environment info
I'm using cuda 12.4, so I use ```pip install pytorch``` instead of conda provided in install.md
Also, I've tried another environment using the same commands in install.md, but the same bug occured | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6746/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6746/timeline | null | completed | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6745 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6745/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6745/comments | https://api.github.com/repos/huggingface/datasets/issues/6745/events | https://github.com/huggingface/datasets/issues/6745 | 2,198,541,732 | I_kwDODunzps6DCxWk | 6,745 | Scraping the whole of github including private repos is bad; kindly stop | {
"avatar_url": "https://avatars.githubusercontent.com/u/10137?v=4",
"events_url": "https://api.github.com/users/ghost/events{/privacy}",
"followers_url": "https://api.github.com/users/ghost/followers",
"following_url": "https://api.github.com/users/ghost/following{/other_user}",
"gists_url": "https://api.github.com/users/ghost/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/ghost",
"id": 10137,
"login": "ghost",
"node_id": "MDQ6VXNlcjEwMTM3",
"organizations_url": "https://api.github.com/users/ghost/orgs",
"received_events_url": "https://api.github.com/users/ghost/received_events",
"repos_url": "https://api.github.com/users/ghost/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/ghost/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/ghost/subscriptions",
"type": "User",
"url": "https://api.github.com/users/ghost",
"user_view_type": "public"
} | [
{
"color": "a2eeef",
"default": true,
"description": "New feature or request",
"id": 1935892871,
"name": "enhancement",
"node_id": "MDU6TGFiZWwxOTM1ODkyODcx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement"
}
] | closed | false | null | [] | null | 1 | 2024-03-20T20:54:06Z | 2024-03-21T12:28:04Z | 2024-03-21T10:24:56Z | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Feature request
https://github.com/bigcode-project/opt-out-v2 - opt out is not consent. kindly quit this ridiculous nonsense.
### Motivation
[EDITED: insults not tolerated]
### Your contribution
[EDITED: insults not tolerated] | {
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/lhoestq",
"id": 42851186,
"login": "lhoestq",
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"type": "User",
"url": "https://api.github.com/users/lhoestq",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6745/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6745/timeline | null | completed | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6744 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6744/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6744/comments | https://api.github.com/repos/huggingface/datasets/issues/6744/events | https://github.com/huggingface/datasets/issues/6744 | 2,197,910,168 | I_kwDODunzps6DAXKY | 6,744 | Option to disable file locking | {
"avatar_url": "https://avatars.githubusercontent.com/u/35767167?v=4",
"events_url": "https://api.github.com/users/VRehnberg/events{/privacy}",
"followers_url": "https://api.github.com/users/VRehnberg/followers",
"following_url": "https://api.github.com/users/VRehnberg/following{/other_user}",
"gists_url": "https://api.github.com/users/VRehnberg/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/VRehnberg",
"id": 35767167,
"login": "VRehnberg",
"node_id": "MDQ6VXNlcjM1NzY3MTY3",
"organizations_url": "https://api.github.com/users/VRehnberg/orgs",
"received_events_url": "https://api.github.com/users/VRehnberg/received_events",
"repos_url": "https://api.github.com/users/VRehnberg/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/VRehnberg/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/VRehnberg/subscriptions",
"type": "User",
"url": "https://api.github.com/users/VRehnberg",
"user_view_type": "public"
} | [
{
"color": "a2eeef",
"default": true,
"description": "New feature or request",
"id": 1935892871,
"name": "enhancement",
"node_id": "MDU6TGFiZWwxOTM1ODkyODcx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement"
}
] | open | false | null | [] | null | 0 | 2024-03-20T15:59:45Z | 2024-03-20T15:59:45Z | null | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Feature request
Commands such as `load_dataset` creates file locks with `filelock.FileLock`. It would be good if there was a way to disable this.
### Motivation
File locking doesn't work on all file-systems (in my case NFS mounted Weka). If the `cache_dir` only had small files then it would be possible to point to local disk and the problem would be solved. However, as cache_dir is both where the small info files are written and the processed datasets are put this isn't a feasible solution.
Considering https://github.com/huggingface/datasets/issues/6395 I still do think this is something that belongs in HuggingFace. The possibility to control packages separately is valuable. It might be that a user has their dataset on a file-system that doesn't support file-locking while they are using file locking on local disk to control some other type of access.
### Your contribution
My suggested solution:
```
diff --git a/src/datasets/utils/_filelock.py b/src/datasets/utils/_filelock.py
index 19620e6e..58f41a02 100644
--- a/src/datasets/utils/_filelock.py
+++ b/src/datasets/utils/_filelock.py
@@ -18,11 +18,15 @@
import os
from filelock import FileLock as FileLock_
-from filelock import UnixFileLock
+from filelock import SoftFileLock, UnixFileLock
from filelock import __version__ as _filelock_version
from packaging import version
+if os.getenv('HF_USE_SOFTFILELOCK', 'false').lower() in ('true', '1'):
+ FileLock_ = SoftFileLock
+
+
class FileLock(FileLock_):
"""
A `filelock.FileLock` initializer that handles long paths.
```
| null | {
"+1": 11,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 11,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6744/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6744/timeline | null | null | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6743 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6743/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6743/comments | https://api.github.com/repos/huggingface/datasets/issues/6743/events | https://github.com/huggingface/datasets/pull/6743 | 2,195,481,697 | PR_kwDODunzps5qHeMZ | 6,743 | Allow null values in dict columns | {
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mariosasko",
"id": 47462742,
"login": "mariosasko",
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mariosasko",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 3 | 2024-03-19T16:54:22Z | 2024-04-08T13:08:42Z | 2024-03-19T20:05:19Z | COLLABORATOR | null | null | null | Fix #6738 | {
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mariosasko",
"id": 47462742,
"login": "mariosasko",
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mariosasko",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6743/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6743/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6743.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6743",
"merged_at": "2024-03-19T20:05:19Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6743.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6743"
} |
https://api.github.com/repos/huggingface/datasets/issues/6742 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6742/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6742/comments | https://api.github.com/repos/huggingface/datasets/issues/6742/events | https://github.com/huggingface/datasets/pull/6742 | 2,195,134,854 | PR_kwDODunzps5qGSfG | 6,742 | Fix missing download_config in get_data_patterns | {
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/lhoestq",
"id": 42851186,
"login": "lhoestq",
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"type": "User",
"url": "https://api.github.com/users/lhoestq",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 2 | 2024-03-19T14:29:25Z | 2024-03-19T18:24:39Z | 2024-03-19T18:15:13Z | MEMBER | null | null | null | Reported in https://github.com/huggingface/datasets-server/issues/2607 | {
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/lhoestq",
"id": 42851186,
"login": "lhoestq",
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"type": "User",
"url": "https://api.github.com/users/lhoestq",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6742/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6742/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6742.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6742",
"merged_at": "2024-03-19T18:15:13Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6742.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6742"
} |
https://api.github.com/repos/huggingface/datasets/issues/6741 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6741/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6741/comments | https://api.github.com/repos/huggingface/datasets/issues/6741/events | https://github.com/huggingface/datasets/pull/6741 | 2,194,626,108 | PR_kwDODunzps5qEiu3 | 6,741 | Fix offline mode with single config | {
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/lhoestq",
"id": 42851186,
"login": "lhoestq",
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"type": "User",
"url": "https://api.github.com/users/lhoestq",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 2 | 2024-03-19T10:48:32Z | 2024-03-25T16:35:21Z | 2024-03-25T16:23:59Z | MEMBER | null | null | null | Reported in https://github.com/huggingface/datasets/issues/4760
The cache was not able to reload a dataset with a single config form the cache if the config name is not specificed
For example
```python
from datasets import load_dataset, config
config.HF_DATASETS_OFFLINE = True
load_dataset("openai_humaneval")
```
This was due to a regression in https://github.com/huggingface/datasets/pull/6632 | {
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/lhoestq",
"id": 42851186,
"login": "lhoestq",
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"type": "User",
"url": "https://api.github.com/users/lhoestq",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6741/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6741/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6741.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6741",
"merged_at": "2024-03-25T16:23:59Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6741.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6741"
} |
https://api.github.com/repos/huggingface/datasets/issues/6740 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6740/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6740/comments | https://api.github.com/repos/huggingface/datasets/issues/6740/events | https://github.com/huggingface/datasets/issues/6740 | 2,193,172,074 | I_kwDODunzps6CuSZq | 6,740 | Support for loading geotiff files as a part of the ImageFolder | {
"avatar_url": "https://avatars.githubusercontent.com/u/31362090?v=4",
"events_url": "https://api.github.com/users/sunny1401/events{/privacy}",
"followers_url": "https://api.github.com/users/sunny1401/followers",
"following_url": "https://api.github.com/users/sunny1401/following{/other_user}",
"gists_url": "https://api.github.com/users/sunny1401/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/sunny1401",
"id": 31362090,
"login": "sunny1401",
"node_id": "MDQ6VXNlcjMxMzYyMDkw",
"organizations_url": "https://api.github.com/users/sunny1401/orgs",
"received_events_url": "https://api.github.com/users/sunny1401/received_events",
"repos_url": "https://api.github.com/users/sunny1401/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/sunny1401/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/sunny1401/subscriptions",
"type": "User",
"url": "https://api.github.com/users/sunny1401",
"user_view_type": "public"
} | [
{
"color": "a2eeef",
"default": true,
"description": "New feature or request",
"id": 1935892871,
"name": "enhancement",
"node_id": "MDU6TGFiZWwxOTM1ODkyODcx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement"
}
] | closed | false | null | [] | null | 0 | 2024-03-18T20:00:39Z | 2024-03-27T18:19:48Z | 2024-03-27T18:19:20Z | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Feature request
Request for adding rasterio support to load geotiff as a part of ImageFolder, instead of using PIL
### Motivation
As of now, there are many datasets in HuggingFace Hub which are predominantly focussed towards RemoteSensing or are from RemoteSensing. The current ImageFolder (if I have understood correctly) uses PIL. This is not really optimized because mostly these datasets have images with many channels and additional metadata. Using PIL makes one loose it unless we provide a custom script. Hence, maybe an API could be added to have this in common?
### Your contribution
If the issue is accepted - i can contribute the code, because I would like to have it automated and generalised. | {
"avatar_url": "https://avatars.githubusercontent.com/u/31362090?v=4",
"events_url": "https://api.github.com/users/sunny1401/events{/privacy}",
"followers_url": "https://api.github.com/users/sunny1401/followers",
"following_url": "https://api.github.com/users/sunny1401/following{/other_user}",
"gists_url": "https://api.github.com/users/sunny1401/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/sunny1401",
"id": 31362090,
"login": "sunny1401",
"node_id": "MDQ6VXNlcjMxMzYyMDkw",
"organizations_url": "https://api.github.com/users/sunny1401/orgs",
"received_events_url": "https://api.github.com/users/sunny1401/received_events",
"repos_url": "https://api.github.com/users/sunny1401/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/sunny1401/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/sunny1401/subscriptions",
"type": "User",
"url": "https://api.github.com/users/sunny1401",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6740/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6740/timeline | null | not_planned | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6739 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6739/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6739/comments | https://api.github.com/repos/huggingface/datasets/issues/6739/events | https://github.com/huggingface/datasets/pull/6739 | 2,192,730,134 | PR_kwDODunzps5p-Bwe | 6,739 | Transpose images with EXIF Orientation tag | {
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mariosasko",
"id": 47462742,
"login": "mariosasko",
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mariosasko",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 3 | 2024-03-18T16:43:06Z | 2025-07-03T11:33:18Z | 2024-03-19T15:29:42Z | COLLABORATOR | null | null | null | Closes https://github.com/huggingface/datasets/issues/6252 | {
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mariosasko",
"id": 47462742,
"login": "mariosasko",
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mariosasko",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6739/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6739/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6739.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6739",
"merged_at": "2024-03-19T15:29:41Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6739.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6739"
} |
https://api.github.com/repos/huggingface/datasets/issues/6738 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6738/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6738/comments | https://api.github.com/repos/huggingface/datasets/issues/6738/events | https://github.com/huggingface/datasets/issues/6738 | 2,192,386,536 | I_kwDODunzps6CrSno | 6,738 | Dict feature is non-nullable while nested dict feature is | {
"avatar_url": "https://avatars.githubusercontent.com/u/16348744?v=4",
"events_url": "https://api.github.com/users/polinaeterna/events{/privacy}",
"followers_url": "https://api.github.com/users/polinaeterna/followers",
"following_url": "https://api.github.com/users/polinaeterna/following{/other_user}",
"gists_url": "https://api.github.com/users/polinaeterna/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/polinaeterna",
"id": 16348744,
"login": "polinaeterna",
"node_id": "MDQ6VXNlcjE2MzQ4NzQ0",
"organizations_url": "https://api.github.com/users/polinaeterna/orgs",
"received_events_url": "https://api.github.com/users/polinaeterna/received_events",
"repos_url": "https://api.github.com/users/polinaeterna/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/polinaeterna/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/polinaeterna/subscriptions",
"type": "User",
"url": "https://api.github.com/users/polinaeterna",
"user_view_type": "public"
} | [
{
"color": "d73a4a",
"default": true,
"description": "Something isn't working",
"id": 1935892857,
"name": "bug",
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug"
}
] | closed | false | null | [] | null | 3 | 2024-03-18T14:31:47Z | 2024-03-20T10:24:15Z | 2024-03-19T20:05:20Z | CONTRIBUTOR | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | When i try to create a `Dataset` object with None values inside a dict column, like this:
```python
from datasets import Dataset, Features, Value
Dataset.from_dict(
{
"dict": [{"a": 0, "b": 0}, None],
}, features=Features(
{"dict": {"a": Value("int16"), "b": Value("int16")}}
)
)
```
i get `ValueError: Got None but expected a dictionary instead`.
At the same time, having None in _nested_ dict feature works, for example, this doesn't throw any errors:
```python
from datasets import Dataset, Features, Value, Sequence
dataset = Dataset.from_dict(
{
"list_dict": [[{"a": 0, "b": 0}], None],
"sequence_dict": [[{"a": 0, "b": 0}], None],
}, features=Features({
"list_dict": [{"a": Value("int16"), "b": Value("int16")}],
"sequence_dict": Sequence({"a": Value("int16"), "b": Value("int16")}),
})
)
```
Other types of features also seem to be nullable (but I haven't checked all of them).
Version of `datasets` is the latest atm (2.18.0)
Is this an expected behavior or a bug? | {
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mariosasko",
"id": 47462742,
"login": "mariosasko",
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mariosasko",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6738/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6738/timeline | null | completed | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6737 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6737/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6737/comments | https://api.github.com/repos/huggingface/datasets/issues/6737/events | https://github.com/huggingface/datasets/issues/6737 | 2,190,198,425 | I_kwDODunzps6Ci8aZ | 6,737 | Invalid pattern: '**' can only be an entire path component | {
"avatar_url": "https://avatars.githubusercontent.com/u/28976175?v=4",
"events_url": "https://api.github.com/users/JPonsa/events{/privacy}",
"followers_url": "https://api.github.com/users/JPonsa/followers",
"following_url": "https://api.github.com/users/JPonsa/following{/other_user}",
"gists_url": "https://api.github.com/users/JPonsa/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/JPonsa",
"id": 28976175,
"login": "JPonsa",
"node_id": "MDQ6VXNlcjI4OTc2MTc1",
"organizations_url": "https://api.github.com/users/JPonsa/orgs",
"received_events_url": "https://api.github.com/users/JPonsa/received_events",
"repos_url": "https://api.github.com/users/JPonsa/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/JPonsa/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/JPonsa/subscriptions",
"type": "User",
"url": "https://api.github.com/users/JPonsa",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 7 | 2024-03-16T19:28:46Z | 2024-07-23T14:23:28Z | 2024-05-13T11:32:57Z | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
ValueError: Invalid pattern: '**' can only be an entire path component
when loading any dataset
### Steps to reproduce the bug
import datasets
ds = datasets.load_dataset("TokenBender/code_instructions_122k_alpaca_style")
### Expected behavior
loading the dataset successfully
### Environment info
- `datasets` version: 2.18.0
- Platform: Windows-10-10.0.22631-SP0
- Python version: 3.11.7
- `huggingface_hub` version: 0.20.3
- PyArrow version: 15.0.0
- Pandas version: 2.2.1
- `fsspec` version: 2023.12.2 | {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | {
"+1": 9,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 9,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6737/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6737/timeline | null | completed | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6736 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6736/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6736/comments | https://api.github.com/repos/huggingface/datasets/issues/6736/events | https://github.com/huggingface/datasets/issues/6736 | 2,190,181,422 | I_kwDODunzps6Ci4Qu | 6,736 | Mosaic Streaming (MDS) Support | {
"avatar_url": "https://avatars.githubusercontent.com/u/2498509?v=4",
"events_url": "https://api.github.com/users/siddk/events{/privacy}",
"followers_url": "https://api.github.com/users/siddk/followers",
"following_url": "https://api.github.com/users/siddk/following{/other_user}",
"gists_url": "https://api.github.com/users/siddk/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/siddk",
"id": 2498509,
"login": "siddk",
"node_id": "MDQ6VXNlcjI0OTg1MDk=",
"organizations_url": "https://api.github.com/users/siddk/orgs",
"received_events_url": "https://api.github.com/users/siddk/received_events",
"repos_url": "https://api.github.com/users/siddk/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/siddk/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/siddk/subscriptions",
"type": "User",
"url": "https://api.github.com/users/siddk",
"user_view_type": "public"
} | [
{
"color": "a2eeef",
"default": true,
"description": "New feature or request",
"id": 1935892871,
"name": "enhancement",
"node_id": "MDU6TGFiZWwxOTM1ODkyODcx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement"
}
] | open | false | null | [] | null | 1 | 2024-03-16T18:42:04Z | 2024-03-18T15:13:34Z | null | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Feature request
I'm a huge fan of the current HF Datasets `webdataset` integration (especially the built-in streaming support). However, I'd love to upload some robotics and multimodal datasets I've processed for use with [Mosaic Streaming](https://docs.mosaicml.com/projects/streaming/en/stable/), specifically their [MDS Format](https://docs.mosaicml.com/projects/streaming/en/stable/fundamentals/dataset_format.html#mds).
Because the shard files have similar semantics to WebDataset, I'm hoping that adding such support won't be too much trouble?
### Motivation
One of the downsides with WebDataset is a lack of out-of-the-box determinism (especially for large-scale training and reproducibility), easy job resumption, and the ability to quickly debug / visualize individual examples.
Mosaic Streaming provides a [great interface for this out of the box](https://docs.mosaicml.com/projects/streaming/en/stable/#key-features), so I'd love to see it supported in HF Datasets.
### Your contribution
Happy to help test things / provide example data. Can potentially submit a PR if maintainers could point me to the necessary WebDataset logic / steps for adding a new streaming format! | null | {
"+1": 1,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 1,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6736/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6736/timeline | null | null | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6735 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6735/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6735/comments | https://api.github.com/repos/huggingface/datasets/issues/6735/events | https://github.com/huggingface/datasets/pull/6735 | 2,189,132,932 | PR_kwDODunzps5px84g | 6,735 | Add `mode` parameter to `Image` feature | {
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mariosasko",
"id": 47462742,
"login": "mariosasko",
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mariosasko",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 2 | 2024-03-15T17:21:12Z | 2024-03-18T15:47:48Z | 2024-03-18T15:41:33Z | COLLABORATOR | null | null | null | Fix https://github.com/huggingface/datasets/issues/6675 | {
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mariosasko",
"id": 47462742,
"login": "mariosasko",
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mariosasko",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6735/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6735/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6735.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6735",
"merged_at": "2024-03-18T15:41:33Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6735.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6735"
} |
https://api.github.com/repos/huggingface/datasets/issues/6734 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6734/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6734/comments | https://api.github.com/repos/huggingface/datasets/issues/6734/events | https://github.com/huggingface/datasets/issues/6734 | 2,187,646,694 | I_kwDODunzps6CZNbm | 6,734 | Tokenization slows towards end of dataset | {
"avatar_url": "https://avatars.githubusercontent.com/u/98723285?v=4",
"events_url": "https://api.github.com/users/ethansmith2000/events{/privacy}",
"followers_url": "https://api.github.com/users/ethansmith2000/followers",
"following_url": "https://api.github.com/users/ethansmith2000/following{/other_user}",
"gists_url": "https://api.github.com/users/ethansmith2000/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/ethansmith2000",
"id": 98723285,
"login": "ethansmith2000",
"node_id": "U_kgDOBeJl1Q",
"organizations_url": "https://api.github.com/users/ethansmith2000/orgs",
"received_events_url": "https://api.github.com/users/ethansmith2000/received_events",
"repos_url": "https://api.github.com/users/ethansmith2000/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/ethansmith2000/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/ethansmith2000/subscriptions",
"type": "User",
"url": "https://api.github.com/users/ethansmith2000",
"user_view_type": "public"
} | [] | open | false | null | [] | null | 4 | 2024-03-15T03:27:36Z | 2025-02-20T17:40:54Z | null | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
Mapped tokenization slows down substantially towards end of dataset.
train set started off very slow, caught up to 20k then tapered off til the end.
what's particularly strange is that the tokenization crashed a few times before due to errors with invalid tokens somewhere or corrupted downloads, and the speed ups/downs consistently happened the same times
```bash
Running tokenizer on dataset (num_proc=48): 0%| | 847000/881416735 [12:18<252:45:45, 967.72 examples/s]
Running tokenizer on dataset (num_proc=48): 0%| | 848000/881416735 [12:19<224:16:10, 1090.66 examples/s]
Running tokenizer on dataset (num_proc=48): 10%|β | 84964000/881416735 [3:48:00<11:21:34, 19476.01 examples/s]
Running tokenizer on dataset (num_proc=48): 10%|β | 84967000/881416735 [3:48:00<12:04:01, 18333.79 examples/s]
Running tokenizer on dataset (num_proc=48): 61%|ββββββ | 538631977/881416735 [13:46:40<27:50:04, 3420.84 examples/s]
Running tokenizer on dataset (num_proc=48): 61%|ββββββ | 538632977/881416735 [13:46:40<23:48:20, 3999.77 examples/s]
Running tokenizer on dataset (num_proc=48): 100%|ββββββββββ| 881365886/881416735 [38:30:19<04:34, 185.10 examples/s]
Running tokenizer on dataset (num_proc=48): 100%|ββββββββββ| 881366886/881416735 [38:30:25<04:36, 180.57 examples/s]
```
and validation set as well
```bash
Running tokenizer on dataset (num_proc=48): 90%|βββββββββ | 41544000/46390354 [28:44<02:37, 30798.76 examples/s]
Running tokenizer on dataset (num_proc=48): 90%|βββββββββ | 41550000/46390354 [28:44<02:08, 37698.08 examples/s]
Running tokenizer on dataset (num_proc=48): 96%|ββββββββββ| 44747422/46390354 [2:15:48<12:22:44, 36.87 examples/s]
Running tokenizer on dataset (num_proc=48): 96%|ββββββββββ| 44747422/46390354 [2:16:00<12:22:44, 36.87 examples/s]
```
### Steps to reproduce the bug
using the following kwargs
```python
with accelerator.main_process_first():
lm_datasets = tokenized_datasets.map(
group_texts,
batched=True,
num_proc=48
load_from_cache_file=True,
desc=f"Grouping texts in chunks of {block_size}",
)
```
running through slurm script
```bash
#SBATCH --partition=gpu-nvidia-a100
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --gpus-per-task=8
#SBATCH --cpus-per-task=96
```
using this dataset https://huggingface.co/datasets/togethercomputer/RedPajama-Data-1T
### Expected behavior
Constant speed throughout
### Environment info
- `datasets` version: 2.15.0
- Platform: Linux-5.15.0-1049-aws-x86_64-with-glibc2.10
- Python version: 3.8.18
- `huggingface_hub` version: 0.19.4
- PyArrow version: 14.0.1
- Pandas version: 2.0.3
- `fsspec` version: 2023.10.0 | null | {
"+1": 2,
"-1": 0,
"confused": 0,
"eyes": 1,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 3,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6734/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6734/timeline | null | null | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6733 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6733/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6733/comments | https://api.github.com/repos/huggingface/datasets/issues/6733/events | https://github.com/huggingface/datasets/issues/6733 | 2,186,811,724 | I_kwDODunzps6CWBlM | 6,733 | EmptyDatasetError when loading dataset downloaded with HuggingFace cli | {
"avatar_url": "https://avatars.githubusercontent.com/u/77196999?v=4",
"events_url": "https://api.github.com/users/StwayneXG/events{/privacy}",
"followers_url": "https://api.github.com/users/StwayneXG/followers",
"following_url": "https://api.github.com/users/StwayneXG/following{/other_user}",
"gists_url": "https://api.github.com/users/StwayneXG/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/StwayneXG",
"id": 77196999,
"login": "StwayneXG",
"node_id": "MDQ6VXNlcjc3MTk2OTk5",
"organizations_url": "https://api.github.com/users/StwayneXG/orgs",
"received_events_url": "https://api.github.com/users/StwayneXG/received_events",
"repos_url": "https://api.github.com/users/StwayneXG/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/StwayneXG/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/StwayneXG/subscriptions",
"type": "User",
"url": "https://api.github.com/users/StwayneXG",
"user_view_type": "public"
} | [] | open | false | null | [] | null | 1 | 2024-03-14T16:41:27Z | 2024-03-15T18:09:02Z | null | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
I am using a cluster that does not have access to the internet when given a job. I tried downloading the dataset using the huggingface-cli command and then loading it with load_dataset but I get an error:
```raise EmptyDatasetError(f"The directory at {base_path} doesn't contain any data files") from None```
The dataset I'm using is "lmsys/chatbot_arena_conversations". The folder structure is
- README.md
- data
- train-00000-of-00001-cced8514c7ed782a.parquet
### Steps to reproduce the bug
1. Download dataset using HuggingFace CLI: ```huggingface-cli download lmsys/chatbot_arena_conversations --local-dir ./lmsys/chatbot_arena_conversations```
2. In Python
```
from datasets import load_dataset
load_dataset("lmsys/chatbot_arena_conversations")
```
### Expected behavior
Should return a Dataset Dict in the form of
```
DatasetDict({
train: Dataset({
features: [...],
num_rows: 33,000
})
})
```
### Environment info
Python 3.11.5
Datasets 2.18.0
Transformers 4.38.2
Pytorch 2.2.0
Pyarrow 15.0.1
Rocky Linux release 8.9 (Green Obsidian)
| null | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6733/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6733/timeline | null | null | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6731 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6731/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6731/comments | https://api.github.com/repos/huggingface/datasets/issues/6731/events | https://github.com/huggingface/datasets/issues/6731 | 2,182,844,673 | I_kwDODunzps6CG5EB | 6,731 | Unexpected behavior when using load_dataset with streaming=True in a for loop | {
"avatar_url": "https://avatars.githubusercontent.com/u/42908296?v=4",
"events_url": "https://api.github.com/users/uApiv/events{/privacy}",
"followers_url": "https://api.github.com/users/uApiv/followers",
"following_url": "https://api.github.com/users/uApiv/following{/other_user}",
"gists_url": "https://api.github.com/users/uApiv/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/uApiv",
"id": 42908296,
"login": "uApiv",
"node_id": "MDQ6VXNlcjQyOTA4Mjk2",
"organizations_url": "https://api.github.com/users/uApiv/orgs",
"received_events_url": "https://api.github.com/users/uApiv/received_events",
"repos_url": "https://api.github.com/users/uApiv/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/uApiv/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/uApiv/subscriptions",
"type": "User",
"url": "https://api.github.com/users/uApiv",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 2 | 2024-03-12T23:26:43Z | 2024-04-16T00:00:00Z | 2024-04-16T00:00:00Z | NONE | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | ### Describe the bug
### My Code
```
from datasets import load_dataset
res=[]
for i in [0,1]:
di=load_dataset(
"json",
data_files='path_to.json',
split='train',
streaming=True,
).map(lambda x: {"source": i})
res.append(di)
for e in res[0]:
print(e)
```
### Unexpected Behavior
Data in `res[0]` has `source=1`. However the expected value is 0.
### FYI
I further switch `streaming` to `False`. And the output value is as expected (0). So there may exist bugs in setting `streaming=True` in a for loop.
### Environment
Python 3.8.0
datasets==2.18.0
transformers==4.28.1
### Steps to reproduce the bug
1. Create a Json file with any content.
2. Run the provided code.
3. Switch `streaming` to `False` and run again to see the expected behavior.
### Expected behavior
The expected behavior is the data are mapped with its corresponding value in the for loop.
### Environment info
Python 3.8.0
datasets==2.18.0
transformers==4.28.1
Ubuntu 20.04 | {
"avatar_url": "https://avatars.githubusercontent.com/u/42908296?v=4",
"events_url": "https://api.github.com/users/uApiv/events{/privacy}",
"followers_url": "https://api.github.com/users/uApiv/followers",
"following_url": "https://api.github.com/users/uApiv/following{/other_user}",
"gists_url": "https://api.github.com/users/uApiv/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/uApiv",
"id": 42908296,
"login": "uApiv",
"node_id": "MDQ6VXNlcjQyOTA4Mjk2",
"organizations_url": "https://api.github.com/users/uApiv/orgs",
"received_events_url": "https://api.github.com/users/uApiv/received_events",
"repos_url": "https://api.github.com/users/uApiv/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/uApiv/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/uApiv/subscriptions",
"type": "User",
"url": "https://api.github.com/users/uApiv",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6731/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6731/timeline | null | completed | null | null |
https://api.github.com/repos/huggingface/datasets/issues/6730 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6730/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6730/comments | https://api.github.com/repos/huggingface/datasets/issues/6730/events | https://github.com/huggingface/datasets/pull/6730 | 2,181,881,499 | PR_kwDODunzps5pZDsB | 6,730 | Deprecate Pandas builder | {
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mariosasko",
"id": 47462742,
"login": "mariosasko",
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mariosasko",
"user_view_type": "public"
} | [] | closed | false | null | [] | null | 2 | 2024-03-12T15:12:13Z | 2024-03-12T17:42:33Z | 2024-03-12T17:36:24Z | COLLABORATOR | null | null | null | The Pandas packaged builder is undocumented and relies on `pickle` to read the data, making it **unsafe**. Moreover, I haven't seen a single instance of this builder being used (not even using the GH/Hub search), so we should deprecate it. | {
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/mariosasko",
"id": 47462742,
"login": "mariosasko",
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"type": "User",
"url": "https://api.github.com/users/mariosasko",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6730/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6730/timeline | null | null | false | {
"diff_url": "https://github.com/huggingface/datasets/pull/6730.diff",
"html_url": "https://github.com/huggingface/datasets/pull/6730",
"merged_at": "2024-03-12T17:36:24Z",
"patch_url": "https://github.com/huggingface/datasets/pull/6730.patch",
"url": "https://api.github.com/repos/huggingface/datasets/pulls/6730"
} |
https://api.github.com/repos/huggingface/datasets/issues/6729 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/6729/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/6729/comments | https://api.github.com/repos/huggingface/datasets/issues/6729/events | https://github.com/huggingface/datasets/issues/6729 | 2,180,237,159 | I_kwDODunzps6B88dn | 6,729 | Support zipfiles that span multiple disks? | {
"avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4",
"events_url": "https://api.github.com/users/severo/events{/privacy}",
"followers_url": "https://api.github.com/users/severo/followers",
"following_url": "https://api.github.com/users/severo/following{/other_user}",
"gists_url": "https://api.github.com/users/severo/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/severo",
"id": 1676121,
"login": "severo",
"node_id": "MDQ6VXNlcjE2NzYxMjE=",
"organizations_url": "https://api.github.com/users/severo/orgs",
"received_events_url": "https://api.github.com/users/severo/received_events",
"repos_url": "https://api.github.com/users/severo/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/severo/subscriptions",
"type": "User",
"url": "https://api.github.com/users/severo",
"user_view_type": "public"
} | [
{
"color": "a2eeef",
"default": true,
"description": "New feature or request",
"id": 1935892871,
"name": "enhancement",
"node_id": "MDU6TGFiZWwxOTM1ODkyODcx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement"
},
{
"color": "d876e3",
"default": true,
"description": "Further information is requested",
"id": 1935892912,
"name": "question",
"node_id": "MDU6TGFiZWwxOTM1ODkyOTEy",
"url": "https://api.github.com/repos/huggingface/datasets/labels/question"
}
] | closed | false | null | [] | null | 6 | 2024-03-11T21:07:41Z | 2024-06-26T05:08:59Z | 2024-06-26T05:05:28Z | COLLABORATOR | null | null | {
"completed": 0,
"percent_completed": 0,
"total": 0
} | See https://huggingface.co/datasets/PhilEO-community/PhilEO-downstream
The dataset viewer gives the following error:
```
Error code: ConfigNamesError
Exception: BadZipFile
Message: zipfiles that span multiple disks are not supported
Traceback: Traceback (most recent call last):
File "/src/services/worker/src/worker/job_runners/dataset/config_names.py", line 67, in compute_config_names_response
get_dataset_config_names(
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/inspect.py", line 347, in get_dataset_config_names
dataset_module = dataset_module_factory(
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py", line 1871, in dataset_module_factory
raise e1 from None
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py", line 1846, in dataset_module_factory
return HubDatasetModuleFactoryWithoutScript(
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py", line 1240, in get_module
module_name, default_builder_kwargs = infer_module_for_data_files(
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py", line 584, in infer_module_for_data_files
split_modules = {
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py", line 585, in <dictcomp>
split: infer_module_for_data_files_list(data_files_list, download_config=download_config)
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py", line 526, in infer_module_for_data_files_list
return infer_module_for_data_files_list_in_archives(data_files_list, download_config=download_config)
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py", line 554, in infer_module_for_data_files_list_in_archives
for f in xglob(extracted, recursive=True, download_config=download_config)[
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/download/streaming_download_manager.py", line 576, in xglob
fs, *_ = fsspec.get_fs_token_paths(urlpath, storage_options=storage_options)
File "/src/services/worker/.venv/lib/python3.9/site-packages/fsspec/core.py", line 622, in get_fs_token_paths
fs = filesystem(protocol, **inkwargs)
File "/src/services/worker/.venv/lib/python3.9/site-packages/fsspec/registry.py", line 290, in filesystem
return cls(**storage_options)
File "/src/services/worker/.venv/lib/python3.9/site-packages/fsspec/spec.py", line 79, in __call__
obj = super().__call__(*args, **kwargs)
File "/src/services/worker/.venv/lib/python3.9/site-packages/fsspec/implementations/zip.py", line 57, in __init__
self.zip = zipfile.ZipFile(
File "/usr/local/lib/python3.9/zipfile.py", line 1266, in __init__
self._RealGetContents()
File "/usr/local/lib/python3.9/zipfile.py", line 1329, in _RealGetContents
endrec = _EndRecData(fp)
File "/usr/local/lib/python3.9/zipfile.py", line 286, in _EndRecData
return _EndRecData64(fpin, -sizeEndCentDir, endrec)
File "/usr/local/lib/python3.9/zipfile.py", line 232, in _EndRecData64
raise BadZipFile("zipfiles that span multiple disks are not supported")
zipfile.BadZipFile: zipfiles that span multiple disks are not supported
```
The files (https://huggingface.co/datasets/PhilEO-community/PhilEO-downstream/tree/main/data) are:
<img width="629" alt="Capture dβeΜcran 2024-03-11 aΜ 22 07 30" src="https://github.com/huggingface/datasets/assets/1676121/0bb15a51-d54f-4d73-8572-e427ea644b36">
| {
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/albertvillanova",
"id": 8515462,
"login": "albertvillanova",
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"type": "User",
"url": "https://api.github.com/users/albertvillanova",
"user_view_type": "public"
} | {
"+1": 0,
"-1": 0,
"confused": 0,
"eyes": 0,
"heart": 0,
"hooray": 0,
"laugh": 0,
"rocket": 0,
"total_count": 0,
"url": "https://api.github.com/repos/huggingface/datasets/issues/6729/reactions"
} | https://api.github.com/repos/huggingface/datasets/issues/6729/timeline | null | not_planned | null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.