url
stringlengths
61
61
repository_url
stringclasses
1 value
labels_url
stringlengths
75
75
comments_url
stringlengths
70
70
events_url
stringlengths
68
68
html_url
stringlengths
49
51
id
int64
1.08B
1.73B
node_id
stringlengths
18
19
number
int64
3.45k
5.9k
title
stringlengths
1
290
user
dict
labels
list
state
stringclasses
2 values
locked
bool
1 class
assignee
dict
assignees
list
milestone
dict
comments
sequence
created_at
timestamp[s]
updated_at
timestamp[s]
closed_at
timestamp[s]
author_association
stringclasses
3 values
active_lock_reason
null
draft
bool
2 classes
pull_request
dict
body
stringlengths
2
36.2k
reactions
dict
timeline_url
stringlengths
70
70
performed_via_github_app
null
state_reason
stringclasses
3 values
is_pull_request
bool
2 classes
https://api.github.com/repos/huggingface/datasets/issues/4871
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4871/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4871/comments
https://api.github.com/repos/huggingface/datasets/issues/4871/events
https://github.com/huggingface/datasets/pull/4871
1,346,703,568
PR_kwDODunzps49k9Rm
4,871
Fix: wmt datasets - fix CWMT zh subsets
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_4871). All of your documentation changes will be reflected on that endpoint." ]
2022-08-22T16:42:09
2022-08-23T10:00:20
2022-08-23T10:00:19
MEMBER
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4871", "html_url": "https://github.com/huggingface/datasets/pull/4871", "diff_url": "https://github.com/huggingface/datasets/pull/4871.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4871.patch", "merged_at": "2022-08-23T10:00:19" }
Fix https://github.com/huggingface/datasets/issues/4575 TODO: run `datasets-cli test`: - [x] wmt17 - [x] wmt18 - [x] wmt19
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4871/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4871/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4870
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4870/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4870/comments
https://api.github.com/repos/huggingface/datasets/issues/4870/events
https://github.com/huggingface/datasets/pull/4870
1,346,160,498
PR_kwDODunzps49jGxD
4,870
audio folder check CI
{ "login": "polinaeterna", "id": 16348744, "node_id": "MDQ6VXNlcjE2MzQ4NzQ0", "avatar_url": "https://avatars.githubusercontent.com/u/16348744?v=4", "gravatar_id": "", "url": "https://api.github.com/users/polinaeterna", "html_url": "https://github.com/polinaeterna", "followers_url": "https://api.github.com/users/polinaeterna/followers", "following_url": "https://api.github.com/users/polinaeterna/following{/other_user}", "gists_url": "https://api.github.com/users/polinaeterna/gists{/gist_id}", "starred_url": "https://api.github.com/users/polinaeterna/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/polinaeterna/subscriptions", "organizations_url": "https://api.github.com/users/polinaeterna/orgs", "repos_url": "https://api.github.com/users/polinaeterna/repos", "events_url": "https://api.github.com/users/polinaeterna/events{/privacy}", "received_events_url": "https://api.github.com/users/polinaeterna/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-08-22T10:15:53
2022-11-02T11:54:35
2022-08-22T12:19:40
CONTRIBUTOR
null
true
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4870", "html_url": "https://github.com/huggingface/datasets/pull/4870", "diff_url": "https://github.com/huggingface/datasets/pull/4870.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4870.patch", "merged_at": null }
null
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4870/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4870/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4869
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4869/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4869/comments
https://api.github.com/repos/huggingface/datasets/issues/4869/events
https://github.com/huggingface/datasets/pull/4869
1,345,513,758
PR_kwDODunzps49hBGY
4,869
Fix typos in documentation
{ "login": "fl-lo", "id": 85993954, "node_id": "MDQ6VXNlcjg1OTkzOTU0", "avatar_url": "https://avatars.githubusercontent.com/u/85993954?v=4", "gravatar_id": "", "url": "https://api.github.com/users/fl-lo", "html_url": "https://github.com/fl-lo", "followers_url": "https://api.github.com/users/fl-lo/followers", "following_url": "https://api.github.com/users/fl-lo/following{/other_user}", "gists_url": "https://api.github.com/users/fl-lo/gists{/gist_id}", "starred_url": "https://api.github.com/users/fl-lo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/fl-lo/subscriptions", "organizations_url": "https://api.github.com/users/fl-lo/orgs", "repos_url": "https://api.github.com/users/fl-lo/repos", "events_url": "https://api.github.com/users/fl-lo/events{/privacy}", "received_events_url": "https://api.github.com/users/fl-lo/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-08-21T15:10:03
2022-08-22T09:25:39
2022-08-22T09:09:58
CONTRIBUTOR
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4869", "html_url": "https://github.com/huggingface/datasets/pull/4869", "diff_url": "https://github.com/huggingface/datasets/pull/4869.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4869.patch", "merged_at": "2022-08-22T09:09:58" }
null
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4869/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4869/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4868
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4868/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4868/comments
https://api.github.com/repos/huggingface/datasets/issues/4868/events
https://github.com/huggingface/datasets/pull/4868
1,345,191,322
PR_kwDODunzps49gBk0
4,868
adding mafand to datasets
{ "login": "dadelani", "id": 23586676, "node_id": "MDQ6VXNlcjIzNTg2Njc2", "avatar_url": "https://avatars.githubusercontent.com/u/23586676?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dadelani", "html_url": "https://github.com/dadelani", "followers_url": "https://api.github.com/users/dadelani/followers", "following_url": "https://api.github.com/users/dadelani/following{/other_user}", "gists_url": "https://api.github.com/users/dadelani/gists{/gist_id}", "starred_url": "https://api.github.com/users/dadelani/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dadelani/subscriptions", "organizations_url": "https://api.github.com/users/dadelani/orgs", "repos_url": "https://api.github.com/users/dadelani/repos", "events_url": "https://api.github.com/users/dadelani/events{/privacy}", "received_events_url": "https://api.github.com/users/dadelani/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892913, "node_id": "MDU6TGFiZWwxOTM1ODkyOTEz", "url": "https://api.github.com/repos/huggingface/datasets/labels/wontfix", "name": "wontfix", "color": "ffffff", "default": true, "description": "This will not be worked on" } ]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "Hi @dadelani, thanks for your awesome contribution!!! :heart: \r\n\r\nHowever, now we are using the Hub to add new datasets, instead of this GitHub repo. \r\n\r\nYou could share this dataset under your Hub organization namespace: [Masakhane NLP](https://huggingface.co/masakhane). This way the dataset will be accessible using:\r\n```python\r\nds = load_dataset(\"masakhane/mafand\")\r\n```\r\n\r\nYou have the procedure documented in our online docs: \r\n- [Create a dataset loading script](https://huggingface.co/docs/datasets/dataset_script)\r\n- [Share](https://huggingface.co/docs/datasets/share)\r\n\r\nMoreover, datasets shared on the Hub no longer need the dummy data files.\r\n\r\nPlease, feel free to ping me if you need any further guidance/support.", "thank you for the comment. I have moved it to the Hub https://huggingface.co/datasets/masakhane/mafand", "Great job, @dadelani!!\r\n\r\nPlease, note that in the README.md file, the YAML tags should be preceded and followed by three dashes `---`, so that they are properly parsed. See, e.g.: https://raw.githubusercontent.com/huggingface/datasets/main/templates/README.md", "Also you could replace the line:\r\n```\r\n# Dataset Card for [Needs More Information]\r\n```\r\nwith\r\n```\r\n# Dataset Card for MAFAND-MT\r\n```", "Great, thank you for the feedback. I have fixed both issues." ]
2022-08-20T15:26:14
2022-08-22T11:00:50
2022-08-22T08:52:23
CONTRIBUTOR
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4868", "html_url": "https://github.com/huggingface/datasets/pull/4868", "diff_url": "https://github.com/huggingface/datasets/pull/4868.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4868.patch", "merged_at": null }
I'm addding the MAFAND dataset by Masakhane based on the paper/repository below: Paper: https://aclanthology.org/2022.naacl-main.223/ Code: https://github.com/masakhane-io/lafand-mt Please, help merge this Everything works except for creating dummy data file
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4868/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4868/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4867
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4867/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4867/comments
https://api.github.com/repos/huggingface/datasets/issues/4867/events
https://github.com/huggingface/datasets/pull/4867
1,344,982,646
PR_kwDODunzps49fZle
4,867
Complete tags of superglue dataset card
{ "login": "richarddwang", "id": 17963619, "node_id": "MDQ6VXNlcjE3OTYzNjE5", "avatar_url": "https://avatars.githubusercontent.com/u/17963619?v=4", "gravatar_id": "", "url": "https://api.github.com/users/richarddwang", "html_url": "https://github.com/richarddwang", "followers_url": "https://api.github.com/users/richarddwang/followers", "following_url": "https://api.github.com/users/richarddwang/following{/other_user}", "gists_url": "https://api.github.com/users/richarddwang/gists{/gist_id}", "starred_url": "https://api.github.com/users/richarddwang/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/richarddwang/subscriptions", "organizations_url": "https://api.github.com/users/richarddwang/orgs", "repos_url": "https://api.github.com/users/richarddwang/repos", "events_url": "https://api.github.com/users/richarddwang/events{/privacy}", "received_events_url": "https://api.github.com/users/richarddwang/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-08-19T23:44:39
2022-08-22T09:14:03
2022-08-22T08:58:31
CONTRIBUTOR
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4867", "html_url": "https://github.com/huggingface/datasets/pull/4867", "diff_url": "https://github.com/huggingface/datasets/pull/4867.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4867.patch", "merged_at": "2022-08-22T08:58:31" }
Related to #4479 .
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4867/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4867/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4866
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4866/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4866/comments
https://api.github.com/repos/huggingface/datasets/issues/4866/events
https://github.com/huggingface/datasets/pull/4866
1,344,809,132
PR_kwDODunzps49e1CP
4,866
amend docstring for dunder
{ "login": "schafsam", "id": 37704298, "node_id": "MDQ6VXNlcjM3NzA0Mjk4", "avatar_url": "https://avatars.githubusercontent.com/u/37704298?v=4", "gravatar_id": "", "url": "https://api.github.com/users/schafsam", "html_url": "https://github.com/schafsam", "followers_url": "https://api.github.com/users/schafsam/followers", "following_url": "https://api.github.com/users/schafsam/following{/other_user}", "gists_url": "https://api.github.com/users/schafsam/gists{/gist_id}", "starred_url": "https://api.github.com/users/schafsam/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/schafsam/subscriptions", "organizations_url": "https://api.github.com/users/schafsam/orgs", "repos_url": "https://api.github.com/users/schafsam/repos", "events_url": "https://api.github.com/users/schafsam/events{/privacy}", "received_events_url": "https://api.github.com/users/schafsam/received_events", "type": "User", "site_admin": false }
[]
open
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_4866). All of your documentation changes will be reflected on that endpoint." ]
2022-08-19T19:09:15
2022-09-09T16:33:11
null
NONE
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4866", "html_url": "https://github.com/huggingface/datasets/pull/4866", "diff_url": "https://github.com/huggingface/datasets/pull/4866.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4866.patch", "merged_at": null }
display dunder method in docsting with underlines an not bold markdown.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4866/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4866/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4865
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4865/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4865/comments
https://api.github.com/repos/huggingface/datasets/issues/4865/events
https://github.com/huggingface/datasets/issues/4865
1,344,552,626
I_kwDODunzps5QJD6y
4,865
Dataset Viewer issue for MoritzLaurer/multilingual_nli
{ "login": "MoritzLaurer", "id": 41862082, "node_id": "MDQ6VXNlcjQxODYyMDgy", "avatar_url": "https://avatars.githubusercontent.com/u/41862082?v=4", "gravatar_id": "", "url": "https://api.github.com/users/MoritzLaurer", "html_url": "https://github.com/MoritzLaurer", "followers_url": "https://api.github.com/users/MoritzLaurer/followers", "following_url": "https://api.github.com/users/MoritzLaurer/following{/other_user}", "gists_url": "https://api.github.com/users/MoritzLaurer/gists{/gist_id}", "starred_url": "https://api.github.com/users/MoritzLaurer/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/MoritzLaurer/subscriptions", "organizations_url": "https://api.github.com/users/MoritzLaurer/orgs", "repos_url": "https://api.github.com/users/MoritzLaurer/repos", "events_url": "https://api.github.com/users/MoritzLaurer/events{/privacy}", "received_events_url": "https://api.github.com/users/MoritzLaurer/received_events", "type": "User", "site_admin": false }
[ { "id": 3470211881, "node_id": "LA_kwDODunzps7O1zsp", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset-viewer", "name": "dataset-viewer", "color": "E5583E", "default": false, "description": "Related to the dataset viewer on huggingface.co" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[ "Thanks for reporting @MoritzLaurer.\r\n\r\nCurrently, the dataset preview is working properly: https://huggingface.co/datasets/MoritzLaurer/multilingual_nli\r\n\r\nPlease note that when a dataset is modified, it might take some time until the preview is completely updated.\r\n\r\n@severo might it be worth adding a clearer error message, something like \"The preview is updating, please retry later\"?", "Thanks for your response. You are right, its now working well. I had waited for 30 min or so and refreshed several times and thought there was some other error. Yeah, a different error message sounds like a good idea to avoid confusion. ", "I'm closing this issue then.", "> @severo might it be worth adding a clearer error message, something like \"The preview is updating, please retry later\"?\r\n\r\nYes, it's a known issue, and we're about to ship a better version" ]
2022-08-19T14:55:20
2022-08-22T14:47:14
2022-08-22T06:13:20
NONE
null
null
null
### Link _No response_ ### Description I've just uploaded a new dataset to the hub and the viewer does not work for some reason, see here: https://huggingface.co/datasets/MoritzLaurer/multilingual_nli It displays the error: ``` Status code: 400 Exception: Status400Error Message: The dataset does not exist. ``` Weirdly enough the dataviewer works for an earlier version of the same dataset. The only difference is that it is smaller, but I'm not aware of other changes I have made: https://huggingface.co/datasets/MoritzLaurer/multilingual_nli_test Do you know why the dataviewer is not working? ### Owner _No response_
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4865/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4865/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/4864
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4864/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4864/comments
https://api.github.com/repos/huggingface/datasets/issues/4864/events
https://github.com/huggingface/datasets/issues/4864
1,344,410,043
I_kwDODunzps5QIhG7
4,864
Allow pathlib PoxisPath in Dataset.read_json
{ "login": "cccntu", "id": 31893406, "node_id": "MDQ6VXNlcjMxODkzNDA2", "avatar_url": "https://avatars.githubusercontent.com/u/31893406?v=4", "gravatar_id": "", "url": "https://api.github.com/users/cccntu", "html_url": "https://github.com/cccntu", "followers_url": "https://api.github.com/users/cccntu/followers", "following_url": "https://api.github.com/users/cccntu/following{/other_user}", "gists_url": "https://api.github.com/users/cccntu/gists{/gist_id}", "starred_url": "https://api.github.com/users/cccntu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cccntu/subscriptions", "organizations_url": "https://api.github.com/users/cccntu/orgs", "repos_url": "https://api.github.com/users/cccntu/repos", "events_url": "https://api.github.com/users/cccntu/events{/privacy}", "received_events_url": "https://api.github.com/users/cccntu/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
null
[]
null
[ "This same error will occur using `ds = datasets.load_dataset('json', data_files=['test.jsonl'])`", "@cccntu I want to make a quick fix for this, but I am struggling to find where the json dataset builder is. Do you know?", "@vvvm23 I think you mean think:\r\n```python\r\nds = datasets.load_dataset('json', data_files=[Path('test.jsonl')])\r\n```\r\nAnd the place you want to modify is here:\r\n```\r\nutils/file_utils.py:64, in is_remote_url(url_or_filename)\r\n 63 def is_remote_url(url_or_filename: str) -> bool:\r\n---> 64 parsed = urlparse(url_or_filename)\r\n 65 return parsed.scheme in (\"http\", \"https\", \"s3\", \"gs\", \"hdfs\", \"ftp\")\r\n```\r\n\r\nProbably just need to check first if `url_or_filename` is [PathLike](https://docs.python.org/3/library/os.html#os.PathLike) and return False early.\r\n\r\nBtw, I tried installing from main, and ran my code above and got a different error. Probably because the API have changed.\r\n`AttributeError: module 'datasets' has no attribute 'read_json'`\r\n", "> @vvvm23 I think you mean think:\r\n\r\nYou are correct, thanks!\r\n\r\n> Probably just need to check first if url_or_filename is [PathLike](https://docs.python.org/3/library/os.html#os.PathLike) and return False early.\r\n\r\nIs PathLike sufficient, or should I check the file exists here? Or is that handled later?", "I think here we just want to avoid passing Path to urlparse. A simpler solution is to add a str() call and convert the input to string before passing to the next step. No need to check anything.", "Above PR should do your first suggestion. Hope that works for you, as I am going on holiday and won't be able to change much :wink: " ]
2022-08-19T12:59:17
2023-03-12T11:25:49
null
CONTRIBUTOR
null
null
null
**Is your feature request related to a problem? Please describe.** ``` from pathlib import Path from datasets import Dataset ds = Dataset.read_json(Path('data.json')) ``` causes an error ``` AttributeError: 'PosixPath' object has no attribute 'decode' ``` **Describe the solution you'd like** It should be able to accept PosixPath and read the json from inside.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4864/reactions", "total_count": 3, "+1": 3, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4864/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4863
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4863/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4863/comments
https://api.github.com/repos/huggingface/datasets/issues/4863/events
https://github.com/huggingface/datasets/issues/4863
1,343,737,668
I_kwDODunzps5QF89E
4,863
TFDS wiki_dialog dataset to Huggingface dataset
{ "login": "djaym7", "id": 12378820, "node_id": "MDQ6VXNlcjEyMzc4ODIw", "avatar_url": "https://avatars.githubusercontent.com/u/12378820?v=4", "gravatar_id": "", "url": "https://api.github.com/users/djaym7", "html_url": "https://github.com/djaym7", "followers_url": "https://api.github.com/users/djaym7/followers", "following_url": "https://api.github.com/users/djaym7/following{/other_user}", "gists_url": "https://api.github.com/users/djaym7/gists{/gist_id}", "starred_url": "https://api.github.com/users/djaym7/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/djaym7/subscriptions", "organizations_url": "https://api.github.com/users/djaym7/orgs", "repos_url": "https://api.github.com/users/djaym7/repos", "events_url": "https://api.github.com/users/djaym7/events{/privacy}", "received_events_url": "https://api.github.com/users/djaym7/received_events", "type": "User", "site_admin": false }
[ { "id": 2067376369, "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request", "name": "dataset request", "color": "e99695", "default": false, "description": "Requesting to add a new dataset" } ]
closed
false
null
[]
null
[ "@albertvillanova any help ? The linked dataset is in beam format which is similar to wikipedia dataset in huggingface that you scripted..", "Nvm, I was able to port it to huggingface datasets, will upload to the hub soon", "https://huggingface.co/datasets/djaym7/wiki_dialog", "Thanks for the addition, @djaym7." ]
2022-08-18T23:06:30
2022-08-22T09:41:45
2022-08-22T05:18:53
NONE
null
null
null
## Adding a Dataset - **Name:** *Wiki_dialog* - **Description: https://github.com/google-research/dialog-inpainting#:~:text=JSON%20object%2C%20for-,example,-%3A - **Paper: https://arxiv.org/abs/2205.09073 - **Data: https://github.com/google-research/dialog-inpainting - **Motivation:** *Research and Development on biggest corpus of dialog data* Instructions to add a new dataset can be found [here](https://github.com/huggingface/datasets/blob/main/ADD_NEW_DATASET.md).
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4863/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4863/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/4862
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4862/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4862/comments
https://api.github.com/repos/huggingface/datasets/issues/4862/events
https://github.com/huggingface/datasets/issues/4862
1,343,464,699
I_kwDODunzps5QE6T7
4,862
Got "AttributeError: 'xPath' object has no attribute 'read'" when loading an excel dataset with my own code
{ "login": "yana-xuyan", "id": 38536635, "node_id": "MDQ6VXNlcjM4NTM2NjM1", "avatar_url": "https://avatars.githubusercontent.com/u/38536635?v=4", "gravatar_id": "", "url": "https://api.github.com/users/yana-xuyan", "html_url": "https://github.com/yana-xuyan", "followers_url": "https://api.github.com/users/yana-xuyan/followers", "following_url": "https://api.github.com/users/yana-xuyan/following{/other_user}", "gists_url": "https://api.github.com/users/yana-xuyan/gists{/gist_id}", "starred_url": "https://api.github.com/users/yana-xuyan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yana-xuyan/subscriptions", "organizations_url": "https://api.github.com/users/yana-xuyan/orgs", "repos_url": "https://api.github.com/users/yana-xuyan/repos", "events_url": "https://api.github.com/users/yana-xuyan/events{/privacy}", "received_events_url": "https://api.github.com/users/yana-xuyan/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[ "What's more, the downloaded data is actually a folder instead of an excel file.", "Hi hi, instead of using `download_and_extract` function, I only use `download` function: `base_dir = Path(dl_manager.download(urls))`. It turns out that the code works for `datasets==2.2.2`, however, it doesn't work with `datasets==2.4.0`. ", "Hi @yana-xuyan, thanks for reporting.\r\n\r\nIndeed you already found the answer: an Excel file should be just downloaded and not downloaded-and-extracted.\r\n\r\nThe reason why is that if you call also extract, our library will try to infer the compression format (and extract it). And Excel files are viewed as ZIP files and extracted as so (into a directory). This is because the Office Open XML is indeed a zipped file under the hood): https://en.wikipedia.org/wiki/Office_Open_XML\r\n> Office Open XML (also informally known as OOXML) is a **zipped**, XML-based file format\r\n```python\r\nimport zipfile\r\n\r\nzipfile.is_zipfile(\"filename.xlsx\")\r\n```\r\nreturns `True`.", "Hi @albertvillanova, thank you for your reply! Do you have any clue on why the same error still exists with `datasets==2.4.0` even after I don't extract the downloaded file? FYI, if I downgrade to `datasets==2.2.2`, the code works fine.", "I guess this has to do with the cache: you should remove the previously-wrongly generated directory from the cache; otherwise `datasets` tries to re-use it." ]
2022-08-18T18:36:14
2022-08-31T09:25:08
2022-08-31T09:25:08
NONE
null
null
null
## Describe the bug A clear and concise description of what the bug is. ## Steps to reproduce the bug ```python # Sample code to reproduce the bug # The dataset function is as follows: from pathlib import Path from typing import Dict, List, Tuple import datasets import pandas as pd _CITATION = """\ """ _DATASETNAME = "jadi_ide" _DESCRIPTION = """\ """ _HOMEPAGE = "" _LICENSE = "Unknown" _URLS = { _DATASETNAME: "https://github.com/fathanick/Javanese-Dialect-Identification-from-Twitter-Data/raw/main/Update 16K_Dataset.xlsx", } _SOURCE_VERSION = "1.0.0" class JaDi_Ide(datasets.GeneratorBasedBuilder): SOURCE_VERSION = datasets.Version(_SOURCE_VERSION) BUILDER_CONFIGS = [ NusantaraConfig( name="jadi_ide_source", version=SOURCE_VERSION, description="JaDi-Ide source schema", schema="source", subset_id="jadi_ide", ), ] DEFAULT_CONFIG_NAME = "source" def _info(self) -> datasets.DatasetInfo: if self.config.schema == "source": features = datasets.Features( { "id": datasets.Value("string"), "text": datasets.Value("string"), "label": datasets.Value("string") } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: """Returns SplitGenerators.""" # Dataset does not have predetermined split, putting all as TRAIN urls = _URLS[_DATASETNAME] base_dir = Path(dl_manager.download_and_extract(urls)) data_files = {"train": base_dir} return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "filepath": data_files["train"], "split": "train", }, ), ] def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]: """Yields examples as (key, example) tuples.""" df = pd.read_excel(filepath, engine='openpyxl') df.columns = ["id", "text", "label"] if self.config.schema == "source": for row in df.itertuples(): ex = { "id": str(row.id), "text": row.text, "label": row.label, } yield row.id, ex ``` ## Expected results Expecting to load the dataset smoothly. ## Actual results File "/home/xuyan/anaconda3/lib/python3.7/site-packages/datasets/load.py", line 1751, in load_dataset use_auth_token=use_auth_token, File "/home/xuyan/anaconda3/lib/python3.7/site-packages/datasets/builder.py", line 705, in download_and_prepare dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs File "/home/xuyan/anaconda3/lib/python3.7/site-packages/datasets/builder.py", line 1227, in _download_and_prepare super()._download_and_prepare(dl_manager, verify_infos, check_duplicate_keys=verify_infos) File "/home/xuyan/anaconda3/lib/python3.7/site-packages/datasets/builder.py", line 793, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "/home/xuyan/anaconda3/lib/python3.7/site-packages/datasets/builder.py", line 1216, in _prepare_split desc=f"Generating {split_info.name} split", File "/home/xuyan/anaconda3/lib/python3.7/site-packages/tqdm/std.py", line 1195, in __iter__ for obj in iterable: File "/home/xuyan/.cache/huggingface/modules/datasets_modules/datasets/jadi_ide/7a539f2b6f726defea8fbe36ceda17bae66c370f6d6c418e3a08d760ebef7519/jadi_ide.py", line 107, in _generate_examples df = pd.read_excel(filepath, engine='openpyxl') File "/home/xuyan/anaconda3/lib/python3.7/site-packages/datasets/download/streaming_download_manager.py", line 701, in xpandas_read_excel return pd.read_excel(BytesIO(filepath_or_buffer.read()), **kwargs) AttributeError: 'xPath' object has no attribute 'read' ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 2.4.0 - Platform: Linux-4.15.0-142-generic-x86_64-with-debian-stretch-sid - Python version: 3.7.4 - PyArrow version: 9.0.0 - Pandas version: 0.25.1
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4862/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4862/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/4861
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4861/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4861/comments
https://api.github.com/repos/huggingface/datasets/issues/4861/events
https://github.com/huggingface/datasets/issues/4861
1,343,260,220
I_kwDODunzps5QEIY8
4,861
Using disk for memory with the method `from_dict`
{ "login": "HugoLaurencon", "id": 44556846, "node_id": "MDQ6VXNlcjQ0NTU2ODQ2", "avatar_url": "https://avatars.githubusercontent.com/u/44556846?v=4", "gravatar_id": "", "url": "https://api.github.com/users/HugoLaurencon", "html_url": "https://github.com/HugoLaurencon", "followers_url": "https://api.github.com/users/HugoLaurencon/followers", "following_url": "https://api.github.com/users/HugoLaurencon/following{/other_user}", "gists_url": "https://api.github.com/users/HugoLaurencon/gists{/gist_id}", "starred_url": "https://api.github.com/users/HugoLaurencon/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/HugoLaurencon/subscriptions", "organizations_url": "https://api.github.com/users/HugoLaurencon/orgs", "repos_url": "https://api.github.com/users/HugoLaurencon/repos", "events_url": "https://api.github.com/users/HugoLaurencon/events{/privacy}", "received_events_url": "https://api.github.com/users/HugoLaurencon/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
null
[]
null
[ "This issue was also causing an OOM in @nateraw 's workflow and shows again that behavior is confusing - we should definitely switch to using the disk IMO" ]
2022-08-18T15:18:18
2023-01-26T18:36:28
null
MEMBER
null
null
null
**Is your feature request related to a problem? Please describe.** I start with an empty dataset. In a loop, at each iteration, I create a new dataset with the method `from_dict` (based on some data I load) and I concatenate this new dataset with the one at the previous iteration. After some iterations, I have an OOM error. **Describe the solution you'd like** The method `from_dict` loads the data in RAM. It could be good to add an option to use the disk instead. **Describe alternatives you've considered** To solve the problem, I have to do an intermediate step where I save the new datasets at each iteration with `save_to_disk`. Once it's done, I open them all and concatenate them.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4861/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4861/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4860
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4860/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4860/comments
https://api.github.com/repos/huggingface/datasets/issues/4860/events
https://github.com/huggingface/datasets/pull/4860
1,342,311,540
PR_kwDODunzps49WjEu
4,860
Add collection3 dataset
{ "login": "pefimov", "id": 16446994, "node_id": "MDQ6VXNlcjE2NDQ2OTk0", "avatar_url": "https://avatars.githubusercontent.com/u/16446994?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pefimov", "html_url": "https://github.com/pefimov", "followers_url": "https://api.github.com/users/pefimov/followers", "following_url": "https://api.github.com/users/pefimov/following{/other_user}", "gists_url": "https://api.github.com/users/pefimov/gists{/gist_id}", "starred_url": "https://api.github.com/users/pefimov/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pefimov/subscriptions", "organizations_url": "https://api.github.com/users/pefimov/orgs", "repos_url": "https://api.github.com/users/pefimov/repos", "events_url": "https://api.github.com/users/pefimov/events{/privacy}", "received_events_url": "https://api.github.com/users/pefimov/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892913, "node_id": "MDU6TGFiZWwxOTM1ODkyOTEz", "url": "https://api.github.com/repos/huggingface/datasets/labels/wontfix", "name": "wontfix", "color": "ffffff", "default": true, "description": "This will not be worked on" } ]
closed
false
null
[]
null
[ "Hi @pefimov. Thanks for you awesome work on this dataset contribution.\r\n\r\nHowever, now we are using the Hub to add new datasets, instead of this GitHub repo. \r\n\r\nYou could share this dataset under the appropriate Hub organization namespace. This way the dataset will be accessible using:\r\n```python\r\nds = load_dataset(\"<org_namespace>/collection3\")\r\n```\r\n\r\nYou have the procedure documented in our online docs: \r\n- [Create a dataset loading script](https://huggingface.co/docs/datasets/dataset_script)\r\n- [Share](https://huggingface.co/docs/datasets/share)\r\n\r\nMoreover, datasets shared on the Hub no longer need the dummy data files.\r\n\r\nPlease, feel free to ping me if you need any further guidance/support. ", "> However, now we are using the Hub to add new datasets, instead of this GitHub repo.\r\n> \r\n> You could share this dataset under the appropriate Hub organization namespace. This way the dataset will be accessible using:\r\n> \r\n> ```python\r\n> ds = load_dataset(\"<org_namespace>/collection3\")\r\n> ```\r\n> \r\nHi @albertvillanova . Thank you for your response.\r\n\r\nI thought that Collection3 is large and important dataset in Russian presented in 2016 but not represented in huggingface.\r\n\r\nAlso I am not related to authors or organisation of dataset", "The current policy of sharing datasets on the Hub instead of in this GitHub repo has no relation with the importance of the dataset: https://huggingface.co/docs/datasets/share#datasets-on-github-legacy \r\n> The distinction between a Hub dataset and a dataset from GitHub only comes from the legacy sharing workflow. It does not involve any ranking, decisioning, or opinion regarding the contents of the dataset itself.\r\n\r\nIt is not required to be an author/owner (or belong to the organization that is owner) of the dataset in order to share it on the Hub (as it was not the case when sharing them on this GitHub repo). \r\n\r\nIt is recommended to share it under an organization namespace that makes sense though. For this specific dataset, do you know of a clear organization under which it could be shared on the Hub? Maybe \"labinform\", or \"Information Research Laboratory\" or \"Lomonosov Moscow State University\"?\r\n\r\nIn cases like this, where the org is not evident, one possibility could be to contact the dataset owners/creators and ask them. According the publication paper, the authors are:\r\n- V.A. Mozharova\r\n- N.V. Loukachevitch\r\n\r\nI think maybe it would be worth contacting them.", "@pefimov I have contacted the authors (and put you in CC).", "Reply from the authors:\r\n> It is better to use name: Research Computing Center of Lomonosov Moscow State University (short name RCC-MSU)\r\n> https://rcc.msu.ru/en", "I have created the corresponding org namespace and dataset empty repository: https://huggingface.co/datasets/RCC-MSU/collection3\r\n\r\n@pefimov feel free to open a PR on the Hub if you are willing to do so: \r\n- Go to the *Community* tab on the repo: https://huggingface.co/datasets/RCC-MSU/collection3/discussions\r\n- And click: *New pull request* button\r\n\r\nDocs: [Pull requests and Discussions](https://huggingface.co/docs/hub/repositories-pull-requests-discussions) on the Hub", "Thanks" ]
2022-08-17T21:31:42
2022-08-23T20:02:45
2022-08-22T09:08:59
NONE
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4860", "html_url": "https://github.com/huggingface/datasets/pull/4860", "diff_url": "https://github.com/huggingface/datasets/pull/4860.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4860.patch", "merged_at": null }
null
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4860/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4860/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4859
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4859/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4859/comments
https://api.github.com/repos/huggingface/datasets/issues/4859/events
https://github.com/huggingface/datasets/issues/4859
1,342,231,016
I_kwDODunzps5QANHo
4,859
can't install using conda on Windows 10
{ "login": "xoffey", "id": 22627691, "node_id": "MDQ6VXNlcjIyNjI3Njkx", "avatar_url": "https://avatars.githubusercontent.com/u/22627691?v=4", "gravatar_id": "", "url": "https://api.github.com/users/xoffey", "html_url": "https://github.com/xoffey", "followers_url": "https://api.github.com/users/xoffey/followers", "following_url": "https://api.github.com/users/xoffey/following{/other_user}", "gists_url": "https://api.github.com/users/xoffey/gists{/gist_id}", "starred_url": "https://api.github.com/users/xoffey/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/xoffey/subscriptions", "organizations_url": "https://api.github.com/users/xoffey/orgs", "repos_url": "https://api.github.com/users/xoffey/repos", "events_url": "https://api.github.com/users/xoffey/events{/privacy}", "received_events_url": "https://api.github.com/users/xoffey/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
[]
2022-08-17T19:57:37
2022-08-17T19:57:37
null
NONE
null
null
null
## Describe the bug I wanted to install using conda or Anaconda navigator. That didn't work, so I had to install using pip. ## Steps to reproduce the bug conda install -c huggingface -c conda-forge datasets ## Expected results Should have indicated successful installation. ## Actual results Solving environment: failed with initial frozen solve. Retrying with flexible solve. Solving environment: failed with repodata from current_repodata.json, will retry with next repodata source. ... took forever, so I cancelled it with ctrl-c ## Environment info - `datasets` version: 2.4.0 # after installing with pip - Platform: Windows-10-10.0.19044-SP0 - Python version: 3.9.12 - PyArrow version: 9.0.0 - Pandas version: 1.4.2 - conda version: 4.13.0 conda info active environment : base active env location : G:\anaconda2022 shell level : 1 user config file : C:\Users\michael\.condarc populated config files : C:\Users\michael\.condarc conda version : 4.13.0 conda-build version : 3.21.8 python version : 3.9.12.final.0 virtual packages : __cuda=11.1=0 __win=0=0 __archspec=1=x86_64 base environment : G:\anaconda2022 (writable) conda av data dir : G:\anaconda2022\etc\conda conda av metadata url : None channel URLs : https://conda.anaconda.org/pytorch/win-64 https://conda.anaconda.org/pytorch/noarch https://conda.anaconda.org/huggingface/win-64 https://conda.anaconda.org/huggingface/noarch https://conda.anaconda.org/conda-forge/win-64 https://conda.anaconda.org/conda-forge/noarch https://conda.anaconda.org/anaconda-fusion/win-64 https://conda.anaconda.org/anaconda-fusion/noarch https://repo.anaconda.com/pkgs/main/win-64 https://repo.anaconda.com/pkgs/main/noarch https://repo.anaconda.com/pkgs/r/win-64 https://repo.anaconda.com/pkgs/r/noarch https://repo.anaconda.com/pkgs/msys2/win-64 https://repo.anaconda.com/pkgs/msys2/noarch package cache : G:\anaconda2022\pkgs C:\Users\michael\.conda\pkgs C:\Users\michael\AppData\Local\conda\conda\pkgs envs directories : G:\anaconda2022\envs C:\Users\michael\.conda\envs C:\Users\michael\AppData\Local\conda\conda\envs platform : win-64 user-agent : conda/4.13.0 requests/2.27.1 CPython/3.9.12 Windows/10 Windows/10.0.19044 administrator : False netrc file : None offline mode : False
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4859/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4859/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4858
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4858/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4858/comments
https://api.github.com/repos/huggingface/datasets/issues/4858/events
https://github.com/huggingface/datasets/issues/4858
1,340,859,853
I_kwDODunzps5P6-XN
4,858
map() function removes columns when input_columns is not None
{ "login": "pramodith", "id": 16939722, "node_id": "MDQ6VXNlcjE2OTM5NzIy", "avatar_url": "https://avatars.githubusercontent.com/u/16939722?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pramodith", "html_url": "https://github.com/pramodith", "followers_url": "https://api.github.com/users/pramodith/followers", "following_url": "https://api.github.com/users/pramodith/following{/other_user}", "gists_url": "https://api.github.com/users/pramodith/gists{/gist_id}", "starred_url": "https://api.github.com/users/pramodith/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pramodith/subscriptions", "organizations_url": "https://api.github.com/users/pramodith/orgs", "repos_url": "https://api.github.com/users/pramodith/repos", "events_url": "https://api.github.com/users/pramodith/events{/privacy}", "received_events_url": "https://api.github.com/users/pramodith/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "Hi! Thanks for reporting! This looks like a bug. I've just opened a PR with the fix.", "Awesome! Thank you. I'll close the issue once the PR gets merged. :-)", "I guess we should reopen after the revert by:\r\n- #5006" ]
2022-08-16T20:42:30
2022-09-22T13:55:24
2022-09-22T13:55:24
NONE
null
null
null
## Describe the bug The map function, removes features from the dataset that are not present in the _input_columns_ list of columns, despite the columns being removed not mentioned in the _remove_columns_ argument. ## Steps to reproduce the bug ```python from datasets import Dataset ds = Dataset.from_dict({"a" : [1,2,3],"b" : [0,1,0], "c" : [2,4,5]}) def double(x,y): x = x*2 y = y*2 return {"d" : x, "e" : y} ds.map(double, input_columns=["a","c"]) ``` ## Expected results ``` Dataset({ features: ['a', 'b', 'c', 'd', 'e'], num_rows: 3 }) ``` ## Actual results ``` Dataset({ features: ['a', 'c', 'd', 'e'], num_rows: 3 }) ``` In this specific example feature **b** should not be removed. ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 2.4.0 - Platform: linux (colab) - Python version: 3.7.13 - PyArrow version: 6.0.1
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4858/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4858/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/4857
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4857/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4857/comments
https://api.github.com/repos/huggingface/datasets/issues/4857/events
https://github.com/huggingface/datasets/issues/4857
1,340,397,153
I_kwDODunzps5P5NZh
4,857
No preprocessed wikipedia is working on huggingface/datasets
{ "login": "aninrusimha", "id": 30733039, "node_id": "MDQ6VXNlcjMwNzMzMDM5", "avatar_url": "https://avatars.githubusercontent.com/u/30733039?v=4", "gravatar_id": "", "url": "https://api.github.com/users/aninrusimha", "html_url": "https://github.com/aninrusimha", "followers_url": "https://api.github.com/users/aninrusimha/followers", "following_url": "https://api.github.com/users/aninrusimha/following{/other_user}", "gists_url": "https://api.github.com/users/aninrusimha/gists{/gist_id}", "starred_url": "https://api.github.com/users/aninrusimha/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/aninrusimha/subscriptions", "organizations_url": "https://api.github.com/users/aninrusimha/orgs", "repos_url": "https://api.github.com/users/aninrusimha/repos", "events_url": "https://api.github.com/users/aninrusimha/events{/privacy}", "received_events_url": "https://api.github.com/users/aninrusimha/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "Thanks for reporting @aninrusimha.\r\n\r\nPlease, note that the preprocessed datasets are still available, as described in the dataset card, e.g.: https://huggingface.co/datasets/wikipedia\r\n```python\r\nds = load_dataset(\"wikipedia\", \"20220301.en\")\r\n``` ", "This is working now, but I was getting an error a few days ago when running an existing script. Unfortunately I did not do a proper bug report, but for some reason I was unable to load the dataset due to a request being made to the wikimedia website. However, its working now. Thanks for the reply!" ]
2022-08-16T13:55:33
2022-08-17T13:35:08
2022-08-17T13:35:08
NONE
null
null
null
## Describe the bug 20220301 wikipedia dump has been deprecated, so now there is no working wikipedia dump on huggingface https://huggingface.co/datasets/wikipedia https://dumps.wikimedia.org/enwiki/
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4857/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4857/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/4856
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4856/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4856/comments
https://api.github.com/repos/huggingface/datasets/issues/4856/events
https://github.com/huggingface/datasets/issues/4856
1,339,779,957
I_kwDODunzps5P22t1
4,856
file missing when load_dataset with openwebtext on windows
{ "login": "kingstarcraft", "id": 10361976, "node_id": "MDQ6VXNlcjEwMzYxOTc2", "avatar_url": "https://avatars.githubusercontent.com/u/10361976?v=4", "gravatar_id": "", "url": "https://api.github.com/users/kingstarcraft", "html_url": "https://github.com/kingstarcraft", "followers_url": "https://api.github.com/users/kingstarcraft/followers", "following_url": "https://api.github.com/users/kingstarcraft/following{/other_user}", "gists_url": "https://api.github.com/users/kingstarcraft/gists{/gist_id}", "starred_url": "https://api.github.com/users/kingstarcraft/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kingstarcraft/subscriptions", "organizations_url": "https://api.github.com/users/kingstarcraft/orgs", "repos_url": "https://api.github.com/users/kingstarcraft/repos", "events_url": "https://api.github.com/users/kingstarcraft/events{/privacy}", "received_events_url": "https://api.github.com/users/kingstarcraft/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "I have tried to extract ```0015896-b1054262f7da52a0518521e29c8e352c.txt``` from ```17ecf461bfccd469a1fbc264ccb03731f8606eea7b3e2e8b86e13d18040bf5b3/urlsf_subset00-16_data.xz``` with 7-zip\r\nand put the file into cache_path ```F://huggingface/datasets/downloads/extracted/0901d27f43b7e9ac0577da0d0061c8c632ba0b70ecd1b4bfb21562d9b7486faa```\r\nthere is still raise the same error and I find the file was removed from cache_path after I run the run_mlm.py with ```python run_mlm.py --model_type roberta --tokenizer_name roberta-base --dataset_name openwebtext --per_device_train_batch_size 8 --per_device_eval_batch_size 8 --do_train --do_eval --output_dir F:/model/roberta-base```." ]
2022-08-16T04:04:22
2023-01-04T03:39:12
2023-01-04T03:39:12
NONE
null
null
null
## Describe the bug 0015896-b1054262f7da52a0518521e29c8e352c.txt is missing when I run run_mlm.py with openwebtext. I check the cache_path and can not find 0015896-b1054262f7da52a0518521e29c8e352c.txt. but I can find this file in the 17ecf461bfccd469a1fbc264ccb03731f8606eea7b3e2e8b86e13d18040bf5b3/urlsf_subset00-16_data.xz with 7-zip. ## Steps to reproduce the bug ```sh python run_mlm.py --model_type roberta --tokenizer_name roberta-base --dataset_name openwebtext --per_device_train_batch_size 8 --per_device_eval_batch_size 8 --do_train --do_eval --output_dir F:/model/roberta-base ``` or ```python from datasets import load_dataset load_dataset("openwebtext", None, cache_dir=None, use_auth_token=None) ``` ## Expected results Loading is successful ## Actual results Traceback (most recent call last): File "D:\Python\v3.8.5\lib\site-packages\datasets\builder.py", line 704, in download_and_prepare self._download_and_prepare( File "D:\Python\v3.8.5\lib\site-packages\datasets\builder.py", line 1227, in _download_and_prepare super()._download_and_prepare(dl_manager, verify_infos, check_duplicate_keys=verify_infos) File "D:\Python\v3.8.5\lib\site-packages\datasets\builder.py", line 795, in _download_and_prepare raise OSError( OSError: Cannot find data file. Original error: [Errno 22] Invalid argument: 'F://huggingface/datasets/downloads/extracted/0901d27f43b7e9ac0577da0d0061c8c632ba0b70ecd1b4bfb21562d9b7486faa/0015896-b1054262f7da52a0518521e29c8e352c.txt' ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 2.4.0 - Platform: windows - Python version: 3.8.5 - PyArrow version: 9.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4856/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4856/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/4855
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4855/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4855/comments
https://api.github.com/repos/huggingface/datasets/issues/4855/events
https://github.com/huggingface/datasets/issues/4855
1,339,699,975
I_kwDODunzps5P2jMH
4,855
Dataset Viewer issue for super_glue
{ "login": "wzsxxa", "id": 54366859, "node_id": "MDQ6VXNlcjU0MzY2ODU5", "avatar_url": "https://avatars.githubusercontent.com/u/54366859?v=4", "gravatar_id": "", "url": "https://api.github.com/users/wzsxxa", "html_url": "https://github.com/wzsxxa", "followers_url": "https://api.github.com/users/wzsxxa/followers", "following_url": "https://api.github.com/users/wzsxxa/following{/other_user}", "gists_url": "https://api.github.com/users/wzsxxa/gists{/gist_id}", "starred_url": "https://api.github.com/users/wzsxxa/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/wzsxxa/subscriptions", "organizations_url": "https://api.github.com/users/wzsxxa/orgs", "repos_url": "https://api.github.com/users/wzsxxa/repos", "events_url": "https://api.github.com/users/wzsxxa/events{/privacy}", "received_events_url": "https://api.github.com/users/wzsxxa/received_events", "type": "User", "site_admin": false }
[ { "id": 3470211881, "node_id": "LA_kwDODunzps7O1zsp", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset-viewer", "name": "dataset-viewer", "color": "E5583E", "default": false, "description": "Related to the dataset viewer on huggingface.co" } ]
closed
false
null
[]
null
[ "Thanks for reporting @wzsxxa.\r\n\r\nHowever the \"super_glue\" dataset is rendered properly by the Dataset preview: https://huggingface.co/datasets/super_glue" ]
2022-08-16T01:34:56
2022-08-22T10:08:01
2022-08-22T10:07:45
NONE
null
null
null
### Link https://huggingface.co/datasets/super_glue ### Description can't view super_glue dataset on the web page ### Owner _No response_
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4855/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4855/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/4853
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4853/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4853/comments
https://api.github.com/repos/huggingface/datasets/issues/4853/events
https://github.com/huggingface/datasets/pull/4853
1,339,456,490
PR_kwDODunzps49NFNL
4,853
Fix bug and checksums in exams dataset
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-08-15T20:17:57
2022-08-16T06:43:57
2022-08-16T06:29:06
MEMBER
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4853", "html_url": "https://github.com/huggingface/datasets/pull/4853", "diff_url": "https://github.com/huggingface/datasets/pull/4853.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4853.patch", "merged_at": "2022-08-16T06:29:06" }
Fix #4852.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4853/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4853/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4852
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4852/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4852/comments
https://api.github.com/repos/huggingface/datasets/issues/4852/events
https://github.com/huggingface/datasets/issues/4852
1,339,450,991
I_kwDODunzps5P1mZv
4,852
Bug in multilingual_with_para config of exams dataset and checksums error
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @albertvillanova. Unfortunately I still get this error. Is this because the merge has yet to be released? Is there a way to track the release?", "Hi @thesofakillers, yes you are right: the fix will be available after next release (it was planned for today; Monday at the latest).\r\n\r\nIn the meantime, you can use the version of the `exams` on our main branch by passing `revision` to `load_dataset`:\r\n```python\r\nds = load_dataset(\"exams\", revision=\"main\")\r\n```" ]
2022-08-15T20:14:52
2022-09-16T09:50:55
2022-08-16T06:29:07
MEMBER
null
null
null
## Describe the bug There is a bug for "multilingual_with_para" config in exams dataset: ```python ds = load_dataset("./datasets/exams", split="train") ``` raises: ``` KeyError: 'choices' ``` Moreover, there is a NonMatchingChecksumError: ``` NonMatchingChecksumError: Checksums didn't match for dataset source files: ['https://github.com/mhardalov/exams-qa/raw/main/data/exams/multilingual/with_paragraphs/train_with_para.jsonl.tar.gz', 'https://github.com/mhardalov/exams-qa/raw/main/data/exams/multilingual/with_paragraphs/dev_with_para.jsonl.tar.gz', 'https://github.com/mhardalov/exams-qa/raw/main/data/exams/multilingual/with_paragraphs/test_with_para.jsonl.tar.gz', 'https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/with_paragraphs/test_with_para.jsonl.tar.gz', 'https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/with_paragraphs/train_bg_with_para.jsonl.tar.gz', 'https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/with_paragraphs/dev_bg_with_para.jsonl.tar.gz', 'https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/with_paragraphs/train_hr_with_para.jsonl.tar.gz', 'https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/with_paragraphs/dev_hr_with_para.jsonl.tar.gz', 'https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/with_paragraphs/train_hu_with_para.jsonl.tar.gz', 'https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/with_paragraphs/dev_hu_with_para.jsonl.tar.gz', 'https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/with_paragraphs/train_it_with_para.jsonl.tar.gz', 'https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/with_paragraphs/dev_it_with_para.jsonl.tar.gz', 'https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/with_paragraphs/train_mk_with_para.jsonl.tar.gz', 'https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/with_paragraphs/dev_mk_with_para.jsonl.tar.gz', 'https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/with_paragraphs/train_pl_with_para.jsonl.tar.gz', 'https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/with_paragraphs/dev_pl_with_para.jsonl.tar.gz', 'https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/with_paragraphs/train_pt_with_para.jsonl.tar.gz', 'https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/with_paragraphs/dev_pt_with_para.jsonl.tar.gz', 'https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/with_paragraphs/train_sq_with_para.jsonl.tar.gz', 'https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/with_paragraphs/dev_sq_with_para.jsonl.tar.gz', 'https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/with_paragraphs/train_sr_with_para.jsonl.tar.gz', 'https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/with_paragraphs/dev_sr_with_para.jsonl.tar.gz', 'https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/with_paragraphs/train_tr_with_para.jsonl.tar.gz', 'https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/with_paragraphs/dev_tr_with_para.jsonl.tar.gz', 'https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/with_paragraphs/train_vi_with_para.jsonl.tar.gz', 'https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/with_paragraphs/dev_vi_with_para.jsonl.tar.gz'] ``` CC: @thesofakillers
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4852/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4852/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/4851
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4851/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4851/comments
https://api.github.com/repos/huggingface/datasets/issues/4851/events
https://github.com/huggingface/datasets/pull/4851
1,339,085,917
PR_kwDODunzps49L6ee
4,851
Fix license tag and Source Data section in billsum dataset card
{ "login": "kashif", "id": 8100, "node_id": "MDQ6VXNlcjgxMDA=", "avatar_url": "https://avatars.githubusercontent.com/u/8100?v=4", "gravatar_id": "", "url": "https://api.github.com/users/kashif", "html_url": "https://github.com/kashif", "followers_url": "https://api.github.com/users/kashif/followers", "following_url": "https://api.github.com/users/kashif/following{/other_user}", "gists_url": "https://api.github.com/users/kashif/gists{/gist_id}", "starred_url": "https://api.github.com/users/kashif/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kashif/subscriptions", "organizations_url": "https://api.github.com/users/kashif/orgs", "repos_url": "https://api.github.com/users/kashif/repos", "events_url": "https://api.github.com/users/kashif/events{/privacy}", "received_events_url": "https://api.github.com/users/kashif/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "thanks @albertvillanova done thank you!" ]
2022-08-15T14:37:00
2022-08-22T13:56:24
2022-08-22T13:40:59
CONTRIBUTOR
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4851", "html_url": "https://github.com/huggingface/datasets/pull/4851", "diff_url": "https://github.com/huggingface/datasets/pull/4851.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4851.patch", "merged_at": "2022-08-22T13:40:59" }
Fixed the data source and license fields
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4851/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4851/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4850
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4850/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4850/comments
https://api.github.com/repos/huggingface/datasets/issues/4850/events
https://github.com/huggingface/datasets/pull/4850
1,338,702,306
PR_kwDODunzps49KnZ8
4,850
Fix test of _get_extraction_protocol for TAR files
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-08-15T08:37:58
2022-08-15T09:42:56
2022-08-15T09:28:46
MEMBER
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4850", "html_url": "https://github.com/huggingface/datasets/pull/4850", "diff_url": "https://github.com/huggingface/datasets/pull/4850.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4850.patch", "merged_at": "2022-08-15T09:28:46" }
While working in another PR, I discovered an xpass test (a test that is supposed to xfail but nevertheless passes) when testing `_get_extraction_protocol`: https://github.com/huggingface/datasets/runs/7818845285?check_suite_focus=true ``` XPASS tests/test_streaming_download_manager.py::test_streaming_dl_manager_get_extraction_protocol_throws[https://foo.bar/train.tar] ``` This PR: - refactors the test so that it tests the raise of the exceptions instead of xfailing - fixes the test for TAR files: it does not raise an exception, but returns "tar" - fixes some tests wrongly named: exchange `test_streaming_dl_manager_get_extraction_protocol` with `test_streaming_dl_manager_get_extraction_protocol_gg_drive`
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4850/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4850/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4849
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4849/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4849/comments
https://api.github.com/repos/huggingface/datasets/issues/4849/events
https://github.com/huggingface/datasets/pull/4849
1,338,273,900
PR_kwDODunzps49JN8d
4,849
1.18.x
{ "login": "Mr-Robot-001", "id": 49282718, "node_id": "MDQ6VXNlcjQ5MjgyNzE4", "avatar_url": "https://avatars.githubusercontent.com/u/49282718?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Mr-Robot-001", "html_url": "https://github.com/Mr-Robot-001", "followers_url": "https://api.github.com/users/Mr-Robot-001/followers", "following_url": "https://api.github.com/users/Mr-Robot-001/following{/other_user}", "gists_url": "https://api.github.com/users/Mr-Robot-001/gists{/gist_id}", "starred_url": "https://api.github.com/users/Mr-Robot-001/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Mr-Robot-001/subscriptions", "organizations_url": "https://api.github.com/users/Mr-Robot-001/orgs", "repos_url": "https://api.github.com/users/Mr-Robot-001/repos", "events_url": "https://api.github.com/users/Mr-Robot-001/events{/privacy}", "received_events_url": "https://api.github.com/users/Mr-Robot-001/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2022-08-14T15:09:19
2022-08-14T15:10:02
2022-08-14T15:10:02
NONE
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4849", "html_url": "https://github.com/huggingface/datasets/pull/4849", "diff_url": "https://github.com/huggingface/datasets/pull/4849.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4849.patch", "merged_at": null }
null
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4849/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4849/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4848
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4848/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4848/comments
https://api.github.com/repos/huggingface/datasets/issues/4848/events
https://github.com/huggingface/datasets/pull/4848
1,338,271,833
PR_kwDODunzps49JNj_
4,848
a
{ "login": "Mr-Robot-001", "id": 49282718, "node_id": "MDQ6VXNlcjQ5MjgyNzE4", "avatar_url": "https://avatars.githubusercontent.com/u/49282718?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Mr-Robot-001", "html_url": "https://github.com/Mr-Robot-001", "followers_url": "https://api.github.com/users/Mr-Robot-001/followers", "following_url": "https://api.github.com/users/Mr-Robot-001/following{/other_user}", "gists_url": "https://api.github.com/users/Mr-Robot-001/gists{/gist_id}", "starred_url": "https://api.github.com/users/Mr-Robot-001/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Mr-Robot-001/subscriptions", "organizations_url": "https://api.github.com/users/Mr-Robot-001/orgs", "repos_url": "https://api.github.com/users/Mr-Robot-001/repos", "events_url": "https://api.github.com/users/Mr-Robot-001/events{/privacy}", "received_events_url": "https://api.github.com/users/Mr-Robot-001/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2022-08-14T15:01:16
2022-08-14T15:09:59
2022-08-14T15:09:59
NONE
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4848", "html_url": "https://github.com/huggingface/datasets/pull/4848", "diff_url": "https://github.com/huggingface/datasets/pull/4848.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4848.patch", "merged_at": null }
null
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4848/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4848/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4847
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4847/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4847/comments
https://api.github.com/repos/huggingface/datasets/issues/4847/events
https://github.com/huggingface/datasets/pull/4847
1,338,270,636
PR_kwDODunzps49JNWX
4,847
Test win ci
{ "login": "Mr-Robot-001", "id": 49282718, "node_id": "MDQ6VXNlcjQ5MjgyNzE4", "avatar_url": "https://avatars.githubusercontent.com/u/49282718?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Mr-Robot-001", "html_url": "https://github.com/Mr-Robot-001", "followers_url": "https://api.github.com/users/Mr-Robot-001/followers", "following_url": "https://api.github.com/users/Mr-Robot-001/following{/other_user}", "gists_url": "https://api.github.com/users/Mr-Robot-001/gists{/gist_id}", "starred_url": "https://api.github.com/users/Mr-Robot-001/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Mr-Robot-001/subscriptions", "organizations_url": "https://api.github.com/users/Mr-Robot-001/orgs", "repos_url": "https://api.github.com/users/Mr-Robot-001/repos", "events_url": "https://api.github.com/users/Mr-Robot-001/events{/privacy}", "received_events_url": "https://api.github.com/users/Mr-Robot-001/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2022-08-14T14:57:00
2022-08-14T14:57:45
2022-08-14T14:57:45
NONE
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4847", "html_url": "https://github.com/huggingface/datasets/pull/4847", "diff_url": "https://github.com/huggingface/datasets/pull/4847.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4847.patch", "merged_at": null }
aa
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4847/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4847/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4846
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4846/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4846/comments
https://api.github.com/repos/huggingface/datasets/issues/4846/events
https://github.com/huggingface/datasets/pull/4846
1,337,979,897
PR_kwDODunzps49IYSC
4,846
Update documentation card of miam dataset
{ "login": "PierreColombo", "id": 22492839, "node_id": "MDQ6VXNlcjIyNDkyODM5", "avatar_url": "https://avatars.githubusercontent.com/u/22492839?v=4", "gravatar_id": "", "url": "https://api.github.com/users/PierreColombo", "html_url": "https://github.com/PierreColombo", "followers_url": "https://api.github.com/users/PierreColombo/followers", "following_url": "https://api.github.com/users/PierreColombo/following{/other_user}", "gists_url": "https://api.github.com/users/PierreColombo/gists{/gist_id}", "starred_url": "https://api.github.com/users/PierreColombo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/PierreColombo/subscriptions", "organizations_url": "https://api.github.com/users/PierreColombo/orgs", "repos_url": "https://api.github.com/users/PierreColombo/repos", "events_url": "https://api.github.com/users/PierreColombo/events{/privacy}", "received_events_url": "https://api.github.com/users/PierreColombo/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "Ahahah :D not sur how i broke something by updating the README :D ", "Thanks for the fix @PierreColombo. \r\n\r\nOnce a README is modified, our CI runs tests on it, requiring additional quality fixes, so that all READMEs are progressively improved and have some minimal tags/sections/information.\r\n\r\nFor this specific README file, the additional quality requirements of the CI are: https://github.com/huggingface/datasets/runs/7819924428?check_suite_focus=true\r\n```\r\nE The following issues were found for the README at `/home/runner/work/datasets/datasets/datasets/miam/README.md`:\r\nE -\tSection `Additional Information` is missing subsection: `Dataset Curators`.\r\nE -\tSection `Additional Information` is missing subsection: `Contributions`.\r\nE -\t`Additional Information` has an extra subsection: `Benchmark Curators`. Skipping further validation checks for this subsection as expected structure is unknown.\r\n```", "Thanks a lot Albert :)))" ]
2022-08-13T14:38:55
2022-08-17T00:50:04
2022-08-14T10:26:08
CONTRIBUTOR
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4846", "html_url": "https://github.com/huggingface/datasets/pull/4846", "diff_url": "https://github.com/huggingface/datasets/pull/4846.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4846.patch", "merged_at": "2022-08-14T10:26:08" }
Hi ! Paper has been published at EMNLP.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4846/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4846/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4845
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4845/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4845/comments
https://api.github.com/repos/huggingface/datasets/issues/4845/events
https://github.com/huggingface/datasets/pull/4845
1,337,928,283
PR_kwDODunzps49IOjf
4,845
Mark CI tests as xfail if Hub HTTP error
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-08-13T10:45:11
2022-08-23T04:57:12
2022-08-23T04:42:26
MEMBER
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4845", "html_url": "https://github.com/huggingface/datasets/pull/4845", "diff_url": "https://github.com/huggingface/datasets/pull/4845.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4845.patch", "merged_at": "2022-08-23T04:42:26" }
In order to make testing more robust (and avoid merges to master with red tests), we could mark tests as xfailed (instead of failed) when the Hub raises some temporary HTTP errors. This PR: - marks tests as xfailed only if the Hub raises a 500 error for: - test_upstream_hub - makes pytest report the xfailed/xpassed tests. More tests could also be marked if needed. Examples of CI failures due to temporary Hub HTTP errors: - FAILED tests/test_upstream_hub.py::TestPushToHub::test_push_dataset_dict_to_hub_multiple_files - https://github.com/huggingface/datasets/runs/7806855399?check_suite_focus=true `requests.exceptions.HTTPError: 500 Server Error: Internal Server Error for url: https://hub-ci.huggingface.co/api/datasets/__DUMMY_TRANSFORMERS_USER__/test-16603108028233/commit/main (Request ID: aZeAQ5yLktoGHQYBcJ3zo)` - FAILED tests/test_upstream_hub.py::TestPushToHub::test_push_dataset_dict_to_hub_no_token - https://github.com/huggingface/datasets/runs/7840022996?check_suite_focus=true `requests.exceptions.HTTPError: 500 Server Error: Internal Server Error for url: https://s3.us-east-1.amazonaws.com/lfs-staging.huggingface.co/repos/81/e3/81e3b831fa9bf23190ec041f26ef7ff6d6b71c1a937b8ec1ef1f1f05b508c089/caae596caa179cf45e7c9ac0c6d9a9cb0fe2d305291bfbb2d8b648ae26ed38b6?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=AKIA4N7VTDGOZQA2IKWK%2F20220815%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20220815T144713Z&X-Amz-Expires=900&X-Amz-Signature=5ddddfe8ef2b0601e80ab41c78a4d77d921942b0d8160bcab40ff894095e6823&X-Amz-SignedHeaders=host&x-id=PutObject` - FAILED tests/test_upstream_hub.py::TestPushToHub::test_push_dataset_dict_to_hub_private - https://github.com/huggingface/datasets/runs/7835921082?check_suite_focus=true `requests.exceptions.HTTPError: 500 Server Error: Internal Server Error for url: https://hub-ci.huggingface.co/api/repos/create (Request ID: gL_1I7i2dii9leBhlZen-) - Internal Error - We're working hard to fix that as soon as possible!` - FAILED tests/test_upstream_hub.py::TestPushToHub::test_push_dataset_to_hub_custom_features_image_list - https://github.com/huggingface/datasets/runs/7835920900?check_suite_focus=true - This is not 500, but 404: `requests.exceptions.HTTPError: 404 Client Error: Not Found for url: [https://hub-ci.huggingface.co/datasets/__DUMMY_TRANSFORMERS_USER__/test-16605586458339.git/info/lfs/objects](https://hub-ci.huggingface.co/datasets/__DUMMY_TRANSFORMERS_USER__/test-16605586458339.git/info/lfs/objects/batch)`
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4845/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4845/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4844
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4844/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4844/comments
https://api.github.com/repos/huggingface/datasets/issues/4844/events
https://github.com/huggingface/datasets/pull/4844
1,337,878,249
PR_kwDODunzps49IFLa
4,844
Add 'val' to VALIDATION_KEYWORDS.
{ "login": "akt42", "id": 98386959, "node_id": "U_kgDOBd1EDw", "avatar_url": "https://avatars.githubusercontent.com/u/98386959?v=4", "gravatar_id": "", "url": "https://api.github.com/users/akt42", "html_url": "https://github.com/akt42", "followers_url": "https://api.github.com/users/akt42/followers", "following_url": "https://api.github.com/users/akt42/following{/other_user}", "gists_url": "https://api.github.com/users/akt42/gists{/gist_id}", "starred_url": "https://api.github.com/users/akt42/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/akt42/subscriptions", "organizations_url": "https://api.github.com/users/akt42/orgs", "repos_url": "https://api.github.com/users/akt42/repos", "events_url": "https://api.github.com/users/akt42/events{/privacy}", "received_events_url": "https://api.github.com/users/akt42/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "@mariosasko not sure about how the reviewing process works. Maybe you can have a look because we discussed this elsewhere?", "Hi, thanks! \r\n\r\nLet's add one pattern with `val` to this test before merging: \r\nhttps://github.com/huggingface/datasets/blob/b88a656cf94c4ad972154371c83c1af759fde522/tests/test_data_files.py#L598", "_The documentation is not available anymore as the PR was closed or merged._", "@akt42 note that there is some info about splits keywords in the docs: https://huggingface.co/docs/datasets/main/en/repository_structure#split-names-keywords. I agree it's not clear that it applies not only to filenames, but to directories as well.\r\n\r\nI think \"val\" should be now added to the documentation source file here: https://github.com/huggingface/datasets/blob/main/docs/source/repository_structure.mdx?plain=1#L98", "@polinaeterna Thanks for notifying us that there is a list of supported keywords\r\n\r\nI've added \"val\" to that list and a test." ]
2022-08-13T06:49:41
2022-08-30T10:17:35
2022-08-30T10:14:54
CONTRIBUTOR
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4844", "html_url": "https://github.com/huggingface/datasets/pull/4844", "diff_url": "https://github.com/huggingface/datasets/pull/4844.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4844.patch", "merged_at": "2022-08-30T10:14:54" }
This PR fixes #4839 by adding the word `"val"` to the `VALIDATION_KEYWORDS` so that the `load_dataset()` method with `imagefolder` (and probably, some other directives as well) reads folders named `"val"` as well. I think the supported keywords have to be mentioned in the documentation as well, but I couldn't think of a proper place to add that.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4844/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4844/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4843
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4843/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4843/comments
https://api.github.com/repos/huggingface/datasets/issues/4843/events
https://github.com/huggingface/datasets/pull/4843
1,337,668,699
PR_kwDODunzps49HaWT
4,843
Fix typo in streaming docs
{ "login": "flozi00", "id": 47894090, "node_id": "MDQ6VXNlcjQ3ODk0MDkw", "avatar_url": "https://avatars.githubusercontent.com/u/47894090?v=4", "gravatar_id": "", "url": "https://api.github.com/users/flozi00", "html_url": "https://github.com/flozi00", "followers_url": "https://api.github.com/users/flozi00/followers", "following_url": "https://api.github.com/users/flozi00/following{/other_user}", "gists_url": "https://api.github.com/users/flozi00/gists{/gist_id}", "starred_url": "https://api.github.com/users/flozi00/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/flozi00/subscriptions", "organizations_url": "https://api.github.com/users/flozi00/orgs", "repos_url": "https://api.github.com/users/flozi00/repos", "events_url": "https://api.github.com/users/flozi00/events{/privacy}", "received_events_url": "https://api.github.com/users/flozi00/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-08-12T20:18:21
2022-08-14T11:43:30
2022-08-14T11:02:09
CONTRIBUTOR
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4843", "html_url": "https://github.com/huggingface/datasets/pull/4843", "diff_url": "https://github.com/huggingface/datasets/pull/4843.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4843.patch", "merged_at": "2022-08-14T11:02:09" }
null
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4843/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4843/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4842
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4842/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4842/comments
https://api.github.com/repos/huggingface/datasets/issues/4842/events
https://github.com/huggingface/datasets/pull/4842
1,337,527,764
PR_kwDODunzps49G8CC
4,842
Update stackexchange license
{ "login": "cakiki", "id": 3664563, "node_id": "MDQ6VXNlcjM2NjQ1NjM=", "avatar_url": "https://avatars.githubusercontent.com/u/3664563?v=4", "gravatar_id": "", "url": "https://api.github.com/users/cakiki", "html_url": "https://github.com/cakiki", "followers_url": "https://api.github.com/users/cakiki/followers", "following_url": "https://api.github.com/users/cakiki/following{/other_user}", "gists_url": "https://api.github.com/users/cakiki/gists{/gist_id}", "starred_url": "https://api.github.com/users/cakiki/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cakiki/subscriptions", "organizations_url": "https://api.github.com/users/cakiki/orgs", "repos_url": "https://api.github.com/users/cakiki/repos", "events_url": "https://api.github.com/users/cakiki/events{/privacy}", "received_events_url": "https://api.github.com/users/cakiki/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-08-12T17:39:06
2022-08-14T10:43:18
2022-08-14T10:28:49
CONTRIBUTOR
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4842", "html_url": "https://github.com/huggingface/datasets/pull/4842", "diff_url": "https://github.com/huggingface/datasets/pull/4842.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4842.patch", "merged_at": "2022-08-14T10:28:49" }
The correct license of the stackexchange subset of the Pile is `cc-by-sa-4.0`, as can for example be seen here: https://stackoverflow.com/help/licensing
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4842/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4842/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4841
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4841/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4841/comments
https://api.github.com/repos/huggingface/datasets/issues/4841/events
https://github.com/huggingface/datasets/pull/4841
1,337,401,243
PR_kwDODunzps49Gf0I
4,841
Update ted_talks_iwslt license to include ND
{ "login": "cakiki", "id": 3664563, "node_id": "MDQ6VXNlcjM2NjQ1NjM=", "avatar_url": "https://avatars.githubusercontent.com/u/3664563?v=4", "gravatar_id": "", "url": "https://api.github.com/users/cakiki", "html_url": "https://github.com/cakiki", "followers_url": "https://api.github.com/users/cakiki/followers", "following_url": "https://api.github.com/users/cakiki/following{/other_user}", "gists_url": "https://api.github.com/users/cakiki/gists{/gist_id}", "starred_url": "https://api.github.com/users/cakiki/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cakiki/subscriptions", "organizations_url": "https://api.github.com/users/cakiki/orgs", "repos_url": "https://api.github.com/users/cakiki/repos", "events_url": "https://api.github.com/users/cakiki/events{/privacy}", "received_events_url": "https://api.github.com/users/cakiki/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-08-12T16:14:52
2022-08-14T11:15:22
2022-08-14T11:00:22
CONTRIBUTOR
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4841", "html_url": "https://github.com/huggingface/datasets/pull/4841", "diff_url": "https://github.com/huggingface/datasets/pull/4841.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4841.patch", "merged_at": "2022-08-14T11:00:22" }
Excerpt from the paper's abstract: "Aside from its cultural and social relevance, this content, which is published under the Creative Commons BY-NC-ND license, also represents a precious language resource for the machine translation research community"
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4841/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4841/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4840
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4840/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4840/comments
https://api.github.com/repos/huggingface/datasets/issues/4840/events
https://github.com/huggingface/datasets/issues/4840
1,337,342,672
I_kwDODunzps5PtjrQ
4,840
Dataset Viewer issue for darragh/demo_data_raw3
{ "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false }
[]
open
false
null
[]
null
[ "do you have an idea of why it can occur @huggingface/datasets? The dataset consists of a single parquet file.", "Thanks for reporting @severo.\r\n\r\nI'm not able to reproduce that error. I get instead:\r\n```\r\nFileNotFoundError: [Errno 2] No such file or directory: 'orix/data/ChiSig/唐合乐-9-3.jpg'\r\n```\r\n\r\nWhich pyarrow version are you using? Mine is 6.0.1. ", "OK, I get now your error when not streaming.", "OK!\r\n\r\nIf it's useful, the pyarrow version is 7.0.0:\r\n\r\nhttps://github.com/huggingface/datasets-server/blob/487c39d87998f8d5a35972f1027d6c8e588e622d/services/worker/poetry.lock#L1537-L1543", "Apparently, there is something weird with that Parquet file: its schema is:\r\n```\r\nimages: extension<arrow.py_extension_type<pyarrow.lib.UnknownExtensionType>>\r\n```\r\n\r\nI have forced a right schema:\r\n```python\r\nfrom datasets import Features, Image, load_dataset\r\n\r\nfeatures = Features({\"images\": Image()})\r\nds = datasets.load_dataset(\"parquet\", split=\"train\", data_files=\"train-00000-of-00001.parquet\", features=features)\r\n```\r\nand then recreated a new Parquet file:\r\n```python\r\nds.to_parquet(\"train.parquet\")\r\n```\r\n\r\nNow this Parquet file has the right schema:\r\n```\r\nimages: struct<bytes: binary, path: string>\r\n child 0, bytes: binary\r\n child 1, path: string\r\n```\r\nand can be loaded normally:\r\n```python\r\nIn [26]: ds = load_dataset(\"parquet\", split=\"train\", data_files=\"dataset.parquet\")\r\nn [27]: ds\r\nOut[27]: \r\nDataset({\r\n features: ['images'],\r\n num_rows: 20\r\n})\r\n```" ]
2022-08-12T15:22:58
2022-09-08T07:55:44
null
CONTRIBUTOR
null
null
null
### Link https://huggingface.co/datasets/darragh/demo_data_raw3 ### Description ``` Exception: ValueError Message: Arrow type extension<arrow.py_extension_type<pyarrow.lib.UnknownExtensionType>> does not have a datasets dtype equivalent. ``` reported by @NielsRogge ### Owner No
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4840/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4840/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4839
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4839/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4839/comments
https://api.github.com/repos/huggingface/datasets/issues/4839/events
https://github.com/huggingface/datasets/issues/4839
1,337,206,377
I_kwDODunzps5PtCZp
4,839
ImageFolder dataset builder does not read the validation data set if it is named as "val"
{ "login": "akt42", "id": 98386959, "node_id": "U_kgDOBd1EDw", "avatar_url": "https://avatars.githubusercontent.com/u/98386959?v=4", "gravatar_id": "", "url": "https://api.github.com/users/akt42", "html_url": "https://github.com/akt42", "followers_url": "https://api.github.com/users/akt42/followers", "following_url": "https://api.github.com/users/akt42/following{/other_user}", "gists_url": "https://api.github.com/users/akt42/gists{/gist_id}", "starred_url": "https://api.github.com/users/akt42/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/akt42/subscriptions", "organizations_url": "https://api.github.com/users/akt42/orgs", "repos_url": "https://api.github.com/users/akt42/repos", "events_url": "https://api.github.com/users/akt42/events{/privacy}", "received_events_url": "https://api.github.com/users/akt42/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
closed
false
{ "login": "akt42", "id": 98386959, "node_id": "U_kgDOBd1EDw", "avatar_url": "https://avatars.githubusercontent.com/u/98386959?v=4", "gravatar_id": "", "url": "https://api.github.com/users/akt42", "html_url": "https://github.com/akt42", "followers_url": "https://api.github.com/users/akt42/followers", "following_url": "https://api.github.com/users/akt42/following{/other_user}", "gists_url": "https://api.github.com/users/akt42/gists{/gist_id}", "starred_url": "https://api.github.com/users/akt42/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/akt42/subscriptions", "organizations_url": "https://api.github.com/users/akt42/orgs", "repos_url": "https://api.github.com/users/akt42/repos", "events_url": "https://api.github.com/users/akt42/events{/privacy}", "received_events_url": "https://api.github.com/users/akt42/received_events", "type": "User", "site_admin": false }
[ { "login": "akt42", "id": 98386959, "node_id": "U_kgDOBd1EDw", "avatar_url": "https://avatars.githubusercontent.com/u/98386959?v=4", "gravatar_id": "", "url": "https://api.github.com/users/akt42", "html_url": "https://github.com/akt42", "followers_url": "https://api.github.com/users/akt42/followers", "following_url": "https://api.github.com/users/akt42/following{/other_user}", "gists_url": "https://api.github.com/users/akt42/gists{/gist_id}", "starred_url": "https://api.github.com/users/akt42/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/akt42/subscriptions", "organizations_url": "https://api.github.com/users/akt42/orgs", "repos_url": "https://api.github.com/users/akt42/repos", "events_url": "https://api.github.com/users/akt42/events{/privacy}", "received_events_url": "https://api.github.com/users/akt42/received_events", "type": "User", "site_admin": false } ]
null
[ "#take" ]
2022-08-12T13:26:00
2022-08-30T10:14:55
2022-08-30T10:14:55
CONTRIBUTOR
null
null
null
**Is your feature request related to a problem? Please describe.** Currently, the `'imagefolder'` data set builder in [`load_dataset()`](https://github.com/huggingface/datasets/blob/2.4.0/src/datasets/load.py#L1541] ) only [supports](https://github.com/huggingface/datasets/blob/6c609a322da994de149b2c938f19439bca99408e/src/datasets/data_files.py#L31) the following names as the validation data set directory name: `["validation", "valid", "dev"]`. When the validation directory is named as `'val'`, the Data set will not have a validation split. I expected this to be a trivial task but ended up spending a lot of time before knowing that only the above names are supported. Here's a minimal example of `val` not being recognized: ```python import os import numpy as np import cv2 from datasets import load_dataset # creating a dummy data set with the following structure: # ROOT # | -- train # | ---- class_1 # | ---- class_2 # | -- val # | ---- class_1 # | ---- class_2 ROOT = "data" for which in ["train", "val"]: for class_name in ["class_1", "class_2"]: dir_name = os.path.join(ROOT, which, class_name) if not os.path.exists(dir_name): os.makedirs(dir_name) for i in range(10): cv2.imwrite( os.path.join(dir_name, f"{i}.png"), np.random.random((224, 224)) ) # trying to create a data set dataset = load_dataset( "imagefolder", data_dir=ROOT ) >> dataset DatasetDict({ train: Dataset({ features: ['image', 'label'], num_rows: 20 }) }) # ^ note how the dataset only has a 'train' subset ``` **Describe the solution you'd like** The suggestion is to include `"val"` to [that list ](https://github.com/huggingface/datasets/blob/6c609a322da994de149b2c938f19439bca99408e/src/datasets/data_files.py#L31) as that's a commonly used phrase to name the validation directory. Also, In the documentation, explicitly mention that only such directory names are supported as train/val/test directories to avoid confusion. **Describe alternatives you've considered** In the documentation, explicitly mention that only such directory names are supported as train/val/test directories without adding `val` to the above list. **Additional context** A question asked in the forum: [ Loading an imagenet-style image dataset with train/val directories](https://discuss.huggingface.co/t/loading-an-imagenet-style-image-dataset-with-train-val-directories/21554)
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4839/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4839/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/4838
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4838/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4838/comments
https://api.github.com/repos/huggingface/datasets/issues/4838/events
https://github.com/huggingface/datasets/pull/4838
1,337,194,918
PR_kwDODunzps49F08R
4,838
Fix documentation card of adv_glue dataset
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "The failing test has nothing to do with this PR:\r\n```\r\nFAILED tests/test_upstream_hub.py::TestPushToHub::test_push_dataset_dict_to_hub_multiple_files\r\n```" ]
2022-08-12T13:15:26
2022-08-15T10:17:14
2022-08-15T10:02:11
MEMBER
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4838", "html_url": "https://github.com/huggingface/datasets/pull/4838", "diff_url": "https://github.com/huggingface/datasets/pull/4838.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4838.patch", "merged_at": "2022-08-15T10:02:11" }
Fix documentation card of adv_glue dataset.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4838/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4838/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4837
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4837/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4837/comments
https://api.github.com/repos/huggingface/datasets/issues/4837/events
https://github.com/huggingface/datasets/pull/4837
1,337,079,723
PR_kwDODunzps49Fb6l
4,837
Add support for CSV metadata files to ImageFolder
{ "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "Cool thanks ! Maybe let's include this change after the refactoring from FolderBasedBuilder in #3963 to avoid dealing with too many unpleasant conflicts ?", "@lhoestq I resolved the conflicts (AudioFolder also supports CSV metadata now). Let me know what you think.\r\n", "@lhoestq Thanks for the suggestion! Indeed it makes more sense to use CSV as the default format in the folder-based builders." ]
2022-08-12T11:19:18
2022-08-31T12:01:27
2022-08-31T11:59:07
CONTRIBUTOR
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4837", "html_url": "https://github.com/huggingface/datasets/pull/4837", "diff_url": "https://github.com/huggingface/datasets/pull/4837.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4837.patch", "merged_at": "2022-08-31T11:59:07" }
Fix #4814
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4837/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4837/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4836
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4836/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4836/comments
https://api.github.com/repos/huggingface/datasets/issues/4836/events
https://github.com/huggingface/datasets/issues/4836
1,337,067,632
I_kwDODunzps5Psghw
4,836
Is it possible to pass multiple links to a split in load script?
{ "login": "sadrasabouri", "id": 43045767, "node_id": "MDQ6VXNlcjQzMDQ1NzY3", "avatar_url": "https://avatars.githubusercontent.com/u/43045767?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sadrasabouri", "html_url": "https://github.com/sadrasabouri", "followers_url": "https://api.github.com/users/sadrasabouri/followers", "following_url": "https://api.github.com/users/sadrasabouri/following{/other_user}", "gists_url": "https://api.github.com/users/sadrasabouri/gists{/gist_id}", "starred_url": "https://api.github.com/users/sadrasabouri/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sadrasabouri/subscriptions", "organizations_url": "https://api.github.com/users/sadrasabouri/orgs", "repos_url": "https://api.github.com/users/sadrasabouri/repos", "events_url": "https://api.github.com/users/sadrasabouri/events{/privacy}", "received_events_url": "https://api.github.com/users/sadrasabouri/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
null
[]
null
[]
2022-08-12T11:06:11
2022-08-12T11:06:11
null
NONE
null
null
null
**Is your feature request related to a problem? Please describe.** I wanted to use a python loading script in hugging face datasets that use different sources of text (it's somehow a compilation of multiple datasets + my own dataset) based on how `load_dataset` [works](https://huggingface.co/docs/datasets/loading) I assumed I could do something like bellow in my loading script: ```python ... _URL = "MY_DATASET_URL/resolve/main/data/" _URLS = { "train": [ "FIRST_URL_TO.txt", _URL + "train-00000-of-00001-676bfebbc8742592.parquet" ] } ... ``` but when loading the dataset it raises the following error: ```python File ~/.local/lib/python3.8/site-packages/datasets/builder.py:704, in DatasetBuilder.download_and_prepare(self, download_config, download_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, **download_and_prepare_kwargs) 702 logger.warning("HF google storage unreachable. Downloading and preparing it from source") 703 if not downloaded_from_gcs: --> 704 self._download_and_prepare( 705 dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs ... 668 if isinstance(a, str): 669 # Force-cast str subclasses to str (issue #21127) 670 parts.append(str(a)) TypeError: expected str, bytes or os.PathLike object, not list ``` **Describe the solution you'd like** I believe since it's possible for `load_dataset` to get list of URLs instead of just a URL for `train` split it can be possible here too. **Describe alternatives you've considered** An alternative solution would be to download all needed datasets locally and `push_to_hub` them all, but since the datasets I'm talking about are huge it's not among my options. **Additional context** I think loading `text` beside the `parquet` is completely a different issue but I believe I can figure it out by proposing a config for my dataset to load each entry of `_URLS['train']` separately either by `load_dataset("text", ...` or `load_dataset("parquet", ...`.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4836/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4836/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4835
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4835/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4835/comments
https://api.github.com/repos/huggingface/datasets/issues/4835/events
https://github.com/huggingface/datasets/pull/4835
1,336,994,835
PR_kwDODunzps49FJg9
4,835
Fix documentation card of ethos dataset
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-08-12T09:51:06
2022-08-12T13:13:55
2022-08-12T12:59:39
MEMBER
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4835", "html_url": "https://github.com/huggingface/datasets/pull/4835", "diff_url": "https://github.com/huggingface/datasets/pull/4835.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4835.patch", "merged_at": "2022-08-12T12:59:39" }
Fix documentation card of ethos dataset.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4835/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4835/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4834
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4834/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4834/comments
https://api.github.com/repos/huggingface/datasets/issues/4834/events
https://github.com/huggingface/datasets/pull/4834
1,336,993,511
PR_kwDODunzps49FJOu
4,834
Fix documentation card of recipe_nlg dataset
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-08-12T09:49:39
2022-08-12T11:28:18
2022-08-12T11:13:40
MEMBER
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4834", "html_url": "https://github.com/huggingface/datasets/pull/4834", "diff_url": "https://github.com/huggingface/datasets/pull/4834.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4834.patch", "merged_at": "2022-08-12T11:13:40" }
Fix documentation card of recipe_nlg dataset
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4834/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4834/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4833
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4833/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4833/comments
https://api.github.com/repos/huggingface/datasets/issues/4833/events
https://github.com/huggingface/datasets/pull/4833
1,336,946,965
PR_kwDODunzps49E_Nk
4,833
Fix missing tags in dataset cards
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-08-12T09:04:52
2022-09-22T14:41:23
2022-08-12T09:45:55
MEMBER
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4833", "html_url": "https://github.com/huggingface/datasets/pull/4833", "diff_url": "https://github.com/huggingface/datasets/pull/4833.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4833.patch", "merged_at": "2022-08-12T09:45:55" }
Fix missing tags in dataset cards: - boolq - break_data - definite_pronoun_resolution - emo - kor_nli - pg19 - quartz - sciq - squad_es - wmt14 - wmt15 - wmt16 - wmt17 - wmt18 - wmt19 - wmt_t2t This PR partially fixes the missing tags in dataset cards. Subsequent PRs will follow to complete this task.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4833/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4833/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4832
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4832/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4832/comments
https://api.github.com/repos/huggingface/datasets/issues/4832/events
https://github.com/huggingface/datasets/pull/4832
1,336,727,389
PR_kwDODunzps49EQav
4,832
Fix tags in dataset cards
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "The non-passing tests are caused by other missing information in the dataset cards." ]
2022-08-12T04:11:23
2022-08-12T04:41:55
2022-08-12T04:27:24
MEMBER
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4832", "html_url": "https://github.com/huggingface/datasets/pull/4832", "diff_url": "https://github.com/huggingface/datasets/pull/4832.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4832.patch", "merged_at": "2022-08-12T04:27:24" }
Fix wrong tags in dataset cards.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4832/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4832/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4831
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4831/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4831/comments
https://api.github.com/repos/huggingface/datasets/issues/4831/events
https://github.com/huggingface/datasets/pull/4831
1,336,199,643
PR_kwDODunzps49Cibf
4,831
Add oversampling strategies to interleave datasets
{ "login": "ylacombe", "id": 52246514, "node_id": "MDQ6VXNlcjUyMjQ2NTE0", "avatar_url": "https://avatars.githubusercontent.com/u/52246514?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ylacombe", "html_url": "https://github.com/ylacombe", "followers_url": "https://api.github.com/users/ylacombe/followers", "following_url": "https://api.github.com/users/ylacombe/following{/other_user}", "gists_url": "https://api.github.com/users/ylacombe/gists{/gist_id}", "starred_url": "https://api.github.com/users/ylacombe/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ylacombe/subscriptions", "organizations_url": "https://api.github.com/users/ylacombe/orgs", "repos_url": "https://api.github.com/users/ylacombe/repos", "events_url": "https://api.github.com/users/ylacombe/events{/privacy}", "received_events_url": "https://api.github.com/users/ylacombe/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_4831). All of your documentation changes will be reflected on that endpoint.", "Hi @lhoestq, \r\nThanks for your review! I've added the requested mention in the documentation and corrected the Error type in `interleave_datasets`. \r\nI've also added test cases in `test_arrow_dataset.py`, which was useful since it allow me to detect an error in the case of an oversampling strategy with no sampling probabilities. \r\nCould you double check this part ? I've commented the code to explain the approach.\r\nThanks!\r\n" ]
2022-08-11T16:24:51
2022-12-04T11:23:54
2022-08-24T16:46:07
CONTRIBUTOR
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4831", "html_url": "https://github.com/huggingface/datasets/pull/4831", "diff_url": "https://github.com/huggingface/datasets/pull/4831.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4831.patch", "merged_at": "2022-08-24T16:46:07" }
Hello everyone, Here is a proposal to improve `interleave_datasets` function. Following Issue #3064, and @lhoestq [comment](https://github.com/huggingface/datasets/issues/3064#issuecomment-1022333385), I propose here a code that performs oversampling when interleaving a `Dataset` list. I have myself encountered this problem while trying to implement training on a multilingual dataset following a training strategy similar to that of [XLSUM paper](https://arxiv.org/pdf/2106.13822.pdf), a multilingual abstract summary dataset where the multilingual training dataset is created by sampling from the languages following a smoothing strategy. The main idea is to sample languages that have a low number of samples more frequently than other languages. As in Issue #3064, the current default strategy is a undersampling strategy, which stops as soon as a dataset runs out of samples. The new `all_exhausted` strategy stops building the new dataset as soon as all samples in each dataset have been added at least once. How does it work in practice: - if ``probabilities`` is `None` and the strategy is `all_exhausted`, it simply performs a round robin interleaving that stops when the longest dataset is out of samples. Here the new dataset length will be $maxLengthDataset*nbDataset$. - if ``probabilities`` is not `None` and the strategy is `all_exhausted`, it keeps trace of the datasets which were out of samples but continues to add them to the new dataset, and stops as soons as every dataset runs out of samples at least once. - In the other cases, it is supposed to keep the same behaviour as before. Except that this time, when probabilities are precised, it really stops AS SOON AS a dataset is out of samples. More on the last sentence: The previous example of `interleave_datasets` was: >>> from datasets import Dataset, interleave_datasets >>> d1 = Dataset.from_dict({"a": [0, 1, 2]}) >>> d2 = Dataset.from_dict({"a": [10, 11, 12]}) >>> d3 = Dataset.from_dict({"a": [20, 21, 22]}) >>> dataset = interleave_datasets([d1, d2, d3]) >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22] >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42) >>> dataset["a"] [10, 0, 11, 1, 2, 20, 12] With my implementation, `dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42)` gives: >>> dataset["a"] [10, 0, 11, 1, 2] because `d1` is already out of samples just after `2` is added. Example of the results of applying the different strategies: >>> from datasets import Dataset, interleave_datasets >>> d1 = Dataset.from_dict({"a": [0, 1, 2]}) >>> d2 = Dataset.from_dict({"a": [10, 11, 12]}) >>> d3 = Dataset.from_dict({"a": [20, 21, 22]}) >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted") >>> dataset["a"] [10, 0, 11, 1, 2, 20, 12, 10, 0, 1, 2, 21, 0, 11, 1, 2, 0, 1, 12, 2, 10, 0, 22] >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42) >>> dataset["a"] [10, 0, 11, 1, 2] >>> dataset = interleave_datasets([d1, d2, d3]) >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22] >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted") >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22] >>> d1 = Dataset.from_dict({"a": [0, 1, 2]}) >>> d2 = Dataset.from_dict({"a": [10, 11, 12, 13]}) >>> d3 = Dataset.from_dict({"a": [20, 21, 22, 23, 24]}) >>> dataset = interleave_datasets([d1, d2, d3]) >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22] >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted") >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 23, 1, 0, 24] >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42) >>> dataset["a"] [10, 0, 11, 1, 2] >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted") >>> dataset["a"] [10, 0, 11, 1, 2, 20, 12, 13, ..., 0, 1, 2, 0, 24] **Final note:** I've been using that code for a research project involving a large-scale multilingual dataset. One should be careful when using oversampling to avoid to avoid exploding the size of the dataset. For example, if a very large data set has a low probability of being sampled, the final dataset may be several times the size of that large data set.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4831/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4831/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4830
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4830/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4830/comments
https://api.github.com/repos/huggingface/datasets/issues/4830/events
https://github.com/huggingface/datasets/pull/4830
1,336,177,937
PR_kwDODunzps49Cdro
4,830
Fix task tags in dataset cards
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "The non-passing tests are caused by other missing information in the dataset cards." ]
2022-08-11T16:06:06
2022-08-11T16:37:27
2022-08-11T16:23:00
MEMBER
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4830", "html_url": "https://github.com/huggingface/datasets/pull/4830", "diff_url": "https://github.com/huggingface/datasets/pull/4830.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4830.patch", "merged_at": "2022-08-11T16:23:00" }
null
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4830/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4830/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4829
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4829/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4829/comments
https://api.github.com/repos/huggingface/datasets/issues/4829/events
https://github.com/huggingface/datasets/issues/4829
1,336,068,068
I_kwDODunzps5Posfk
4,829
Misalignment between card tag validation and docs
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
[ "(Note that the doc is aligned with the hub validation rules, and the \"ground truth\" is the hub validation rules given that they apply to all datasets, not just the canonical ones)" ]
2022-08-11T14:44:45
2022-08-11T14:46:35
null
MEMBER
null
null
null
## Describe the bug As pointed out in other issue: https://github.com/huggingface/datasets/pull/4827#discussion_r943536284 the validation of the dataset card tags is not aligned with its documentation: e.g. - implementation: `license: List[str]` - docs: `license: Union[str, List[str]]` They should be aligned. CC: @julien-c
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4829/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4829/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4828
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4828/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4828/comments
https://api.github.com/repos/huggingface/datasets/issues/4828/events
https://github.com/huggingface/datasets/pull/4828
1,336,040,168
PR_kwDODunzps49B_vb
4,828
Support PIL Image objects in `add_item`/`add_column`
{ "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false }
[]
open
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_4828). All of your documentation changes will be reflected on that endpoint.", "Hey @mariosasko could we please merge this? I'm still getting the original error at #4796 ." ]
2022-08-11T14:25:45
2023-02-23T14:01:47
null
CONTRIBUTOR
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4828", "html_url": "https://github.com/huggingface/datasets/pull/4828", "diff_url": "https://github.com/huggingface/datasets/pull/4828.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4828.patch", "merged_at": null }
Fix #4796 PS: We should also improve the type inference in `OptimizedTypeSequence` to make it possible to also infer the complex types (only `Image` currently) in nested arrays (e.g. `[[pil_image], [pil_image, pil_image]]` or `[{"img": pil_image}`]), but I plan to address this in a separate PR.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4828/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4828/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4827
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4827/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4827/comments
https://api.github.com/repos/huggingface/datasets/issues/4827/events
https://github.com/huggingface/datasets/pull/4827
1,335,994,312
PR_kwDODunzps49B1zi
4,827
Add license metadata to pg19
{ "login": "julien-c", "id": 326577, "node_id": "MDQ6VXNlcjMyNjU3Nw==", "avatar_url": "https://avatars.githubusercontent.com/u/326577?v=4", "gravatar_id": "", "url": "https://api.github.com/users/julien-c", "html_url": "https://github.com/julien-c", "followers_url": "https://api.github.com/users/julien-c/followers", "following_url": "https://api.github.com/users/julien-c/following{/other_user}", "gists_url": "https://api.github.com/users/julien-c/gists{/gist_id}", "starred_url": "https://api.github.com/users/julien-c/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/julien-c/subscriptions", "organizations_url": "https://api.github.com/users/julien-c/orgs", "repos_url": "https://api.github.com/users/julien-c/repos", "events_url": "https://api.github.com/users/julien-c/events{/privacy}", "received_events_url": "https://api.github.com/users/julien-c/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-08-11T13:52:20
2022-08-11T15:01:03
2022-08-11T14:46:38
MEMBER
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4827", "html_url": "https://github.com/huggingface/datasets/pull/4827", "diff_url": "https://github.com/huggingface/datasets/pull/4827.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4827.patch", "merged_at": "2022-08-11T14:46:38" }
As reported over email by Roy Rijkers
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4827/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4827/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4826
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4826/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4826/comments
https://api.github.com/repos/huggingface/datasets/issues/4826/events
https://github.com/huggingface/datasets/pull/4826
1,335,987,583
PR_kwDODunzps49B0V3
4,826
Fix language tags in dataset cards
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "The non-passing tests are caused by other missing information in the dataset cards." ]
2022-08-11T13:47:14
2022-08-11T14:17:48
2022-08-11T14:03:12
MEMBER
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4826", "html_url": "https://github.com/huggingface/datasets/pull/4826", "diff_url": "https://github.com/huggingface/datasets/pull/4826.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4826.patch", "merged_at": "2022-08-11T14:03:12" }
Fix language tags in all dataset cards, so that they are validated (aligned with our `languages.json` resource).
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4826/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4826/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4825
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4825/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4825/comments
https://api.github.com/repos/huggingface/datasets/issues/4825/events
https://github.com/huggingface/datasets/pull/4825
1,335,856,882
PR_kwDODunzps49BYWL
4,825
[Windows] Fix Access Denied when using os.rename()
{ "login": "DougTrajano", "id": 8703022, "node_id": "MDQ6VXNlcjg3MDMwMjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8703022?v=4", "gravatar_id": "", "url": "https://api.github.com/users/DougTrajano", "html_url": "https://github.com/DougTrajano", "followers_url": "https://api.github.com/users/DougTrajano/followers", "following_url": "https://api.github.com/users/DougTrajano/following{/other_user}", "gists_url": "https://api.github.com/users/DougTrajano/gists{/gist_id}", "starred_url": "https://api.github.com/users/DougTrajano/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/DougTrajano/subscriptions", "organizations_url": "https://api.github.com/users/DougTrajano/orgs", "repos_url": "https://api.github.com/users/DougTrajano/repos", "events_url": "https://api.github.com/users/DougTrajano/events{/privacy}", "received_events_url": "https://api.github.com/users/DougTrajano/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Cool thank you ! Maybe we can just replace `os.rename` by `shutil.move` instead ?", "> Cool thank you ! Maybe we can just replace `os.rename` by `shutil.move` instead ?\r\n\r\nYes, I think that could be a better solution, but I didn't test it in Linux (e.g. Ubuntu) to guarantee that `os.rename()` could be completely replaced by `shutil.move()`.", "AFAIK `shutil.move` does call `os.rename` first before doing extra work to make it work on windows, so this is should be a safe safe change for linux ;)", "> AFAIK `shutil.move` does call `os.rename` first before doing extra work to make it work on windows, so this is should be a safe safe change for linux ;)\r\n\r\nalright, let me change the PR then.", "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_4825). All of your documentation changes will be reflected on that endpoint.", "Hi @lhoestq looks like one of the tests failed, but is not related to this change, do I need to do something from my side?" ]
2022-08-11T11:57:15
2022-08-24T13:09:07
2022-08-24T13:09:07
CONTRIBUTOR
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4825", "html_url": "https://github.com/huggingface/datasets/pull/4825", "diff_url": "https://github.com/huggingface/datasets/pull/4825.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4825.patch", "merged_at": "2022-08-24T13:09:07" }
In this PR, we are including an additional step when `os.rename()` raises a PermissionError. Basically, we will use `shutil.move()` on the temp files. Fix #2937
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4825/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4825/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4824
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4824/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4824/comments
https://api.github.com/repos/huggingface/datasets/issues/4824/events
https://github.com/huggingface/datasets/pull/4824
1,335,826,639
PR_kwDODunzps49BR5H
4,824
Fix titles in dataset cards
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "The non-passing tests are caused by other missing information in the dataset cards." ]
2022-08-11T11:27:48
2022-08-11T13:46:11
2022-08-11T12:56:49
MEMBER
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4824", "html_url": "https://github.com/huggingface/datasets/pull/4824", "diff_url": "https://github.com/huggingface/datasets/pull/4824.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4824.patch", "merged_at": "2022-08-11T12:56:49" }
Fix all the titles in the dataset cards, so that they conform to the required format.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4824/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4824/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4823
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4823/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4823/comments
https://api.github.com/repos/huggingface/datasets/issues/4823/events
https://github.com/huggingface/datasets/pull/4823
1,335,687,033
PR_kwDODunzps49A0O_
4,823
Update data URL in mkqa dataset
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-08-11T09:16:13
2022-08-11T09:51:50
2022-08-11T09:37:52
MEMBER
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4823", "html_url": "https://github.com/huggingface/datasets/pull/4823", "diff_url": "https://github.com/huggingface/datasets/pull/4823.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4823.patch", "merged_at": "2022-08-11T09:37:51" }
Update data URL in mkqa dataset. Fix #4817.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4823/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4823/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4822
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4822/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4822/comments
https://api.github.com/repos/huggingface/datasets/issues/4822/events
https://github.com/huggingface/datasets/issues/4822
1,335,675,352
I_kwDODunzps5PnMnY
4,822
Moving dataset between namespaces breaks dataset viewer
{ "login": "cakiki", "id": 3664563, "node_id": "MDQ6VXNlcjM2NjQ1NjM=", "avatar_url": "https://avatars.githubusercontent.com/u/3664563?v=4", "gravatar_id": "", "url": "https://api.github.com/users/cakiki", "html_url": "https://github.com/cakiki", "followers_url": "https://api.github.com/users/cakiki/followers", "following_url": "https://api.github.com/users/cakiki/following{/other_user}", "gists_url": "https://api.github.com/users/cakiki/gists{/gist_id}", "starred_url": "https://api.github.com/users/cakiki/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cakiki/subscriptions", "organizations_url": "https://api.github.com/users/cakiki/orgs", "repos_url": "https://api.github.com/users/cakiki/repos", "events_url": "https://api.github.com/users/cakiki/events{/privacy}", "received_events_url": "https://api.github.com/users/cakiki/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
{ "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false }
[ { "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false } ]
null
[ "Let's keep open for now. We should try to reproduce" ]
2022-08-11T09:05:30
2022-09-16T20:03:09
null
CONTRIBUTOR
null
null
null
## Describe the bug I moved a dataset from my own namespace to an org and that broke the dataset viewer. To fix it I had to manually edit the `dataset_info.json` file and change the first key in the json from `username--datasetname` to `orgname--datasetname` ## Steps to reproduce the bug What I did was: 1- Upload a dataset to my own namespace using `push_to_hub` 2- Move the dataset from my namespace to an org using the web interface. ## Expected results For the file to be changed accordingly. ## Actual results Broken dataset viewer. ## Environment info - `datasets` version: 2.3.3.dev0 - Platform: Linux-4.15.0-189-generic-x86_64-with-Ubuntu-18.04-bionic - Python version: 3.7.5 - PyArrow version: 7.0.0 - Pandas version: 1.3.5
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4822/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4822/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4821
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4821/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4821/comments
https://api.github.com/repos/huggingface/datasets/issues/4821/events
https://github.com/huggingface/datasets/pull/4821
1,335,664,588
PR_kwDODunzps49AvaE
4,821
Fix train_test_split docs
{ "login": "NielsRogge", "id": 48327001, "node_id": "MDQ6VXNlcjQ4MzI3MDAx", "avatar_url": "https://avatars.githubusercontent.com/u/48327001?v=4", "gravatar_id": "", "url": "https://api.github.com/users/NielsRogge", "html_url": "https://github.com/NielsRogge", "followers_url": "https://api.github.com/users/NielsRogge/followers", "following_url": "https://api.github.com/users/NielsRogge/following{/other_user}", "gists_url": "https://api.github.com/users/NielsRogge/gists{/gist_id}", "starred_url": "https://api.github.com/users/NielsRogge/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/NielsRogge/subscriptions", "organizations_url": "https://api.github.com/users/NielsRogge/orgs", "repos_url": "https://api.github.com/users/NielsRogge/repos", "events_url": "https://api.github.com/users/NielsRogge/events{/privacy}", "received_events_url": "https://api.github.com/users/NielsRogge/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-08-11T08:55:45
2022-08-11T09:59:29
2022-08-11T09:45:40
CONTRIBUTOR
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4821", "html_url": "https://github.com/huggingface/datasets/pull/4821", "diff_url": "https://github.com/huggingface/datasets/pull/4821.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4821.patch", "merged_at": "2022-08-11T09:45:40" }
I saw that `stratify` is added to the `train_test_split` method as per #4322, hence the docs can be updated.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4821/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4821/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4820
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4820/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4820/comments
https://api.github.com/repos/huggingface/datasets/issues/4820/events
https://github.com/huggingface/datasets/issues/4820
1,335,117,132
I_kwDODunzps5PlEVM
4,820
Terminating: fork() called from a process already using GNU OpenMP, this is unsafe.
{ "login": "talhaanwarch", "id": 37379131, "node_id": "MDQ6VXNlcjM3Mzc5MTMx", "avatar_url": "https://avatars.githubusercontent.com/u/37379131?v=4", "gravatar_id": "", "url": "https://api.github.com/users/talhaanwarch", "html_url": "https://github.com/talhaanwarch", "followers_url": "https://api.github.com/users/talhaanwarch/followers", "following_url": "https://api.github.com/users/talhaanwarch/following{/other_user}", "gists_url": "https://api.github.com/users/talhaanwarch/gists{/gist_id}", "starred_url": "https://api.github.com/users/talhaanwarch/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/talhaanwarch/subscriptions", "organizations_url": "https://api.github.com/users/talhaanwarch/orgs", "repos_url": "https://api.github.com/users/talhaanwarch/repos", "events_url": "https://api.github.com/users/talhaanwarch/events{/privacy}", "received_events_url": "https://api.github.com/users/talhaanwarch/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "Fixed by installing either resampy<3 or resampy>=4" ]
2022-08-10T19:42:33
2022-08-10T19:53:10
2022-08-10T19:53:10
NONE
null
null
null
Hi, when i try to run prepare_dataset function in [fine tuning ASR tutorial 4](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/Fine_tuning_Wav2Vec2_for_English_ASR.ipynb) , i got this error. I got this error Terminating: fork() called from a process already using GNU OpenMP, this is unsafe. There is no other logs available, so i have no clue what is the cause of it. ``` def prepare_dataset(batch): audio = batch["path"] # batched output is "un-batched" batch["input_values"] = processor(audio["array"], sampling_rate=audio["sampling_rate"]).input_values[0] batch["input_length"] = len(batch["input_values"]) with processor.as_target_processor(): batch["labels"] = processor(batch["text"]).input_ids return batch data = data.map(prepare_dataset, remove_columns=data.column_names["train"], num_proc=4) ``` Specify the actual results or traceback. There is no traceback except `Terminating: fork() called from a process already using GNU OpenMP, this is unsafe.` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 2.4.0 - Platform: Linux-5.15.0-43-generic-x86_64-with-glibc2.29 - Python version: 3.8.10 - PyArrow version: 9.0.0 - Pandas version: 1.4.3
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4820/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4820/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/4819
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4819/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4819/comments
https://api.github.com/repos/huggingface/datasets/issues/4819/events
https://github.com/huggingface/datasets/pull/4819
1,335,064,449
PR_kwDODunzps48-xc6
4,819
Add missing language tags to resources
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-08-10T19:06:42
2022-08-10T19:45:49
2022-08-10T19:32:15
MEMBER
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4819", "html_url": "https://github.com/huggingface/datasets/pull/4819", "diff_url": "https://github.com/huggingface/datasets/pull/4819.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4819.patch", "merged_at": "2022-08-10T19:32:15" }
Add missing language tags to resources, required by existing datasets on GitHub.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4819/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4819/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4818
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4818/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4818/comments
https://api.github.com/repos/huggingface/datasets/issues/4818/events
https://github.com/huggingface/datasets/pull/4818
1,334,941,810
PR_kwDODunzps48-W7a
4,818
Add add cc-by-sa-2.5 license tag
{ "login": "polinaeterna", "id": 16348744, "node_id": "MDQ6VXNlcjE2MzQ4NzQ0", "avatar_url": "https://avatars.githubusercontent.com/u/16348744?v=4", "gravatar_id": "", "url": "https://api.github.com/users/polinaeterna", "html_url": "https://github.com/polinaeterna", "followers_url": "https://api.github.com/users/polinaeterna/followers", "following_url": "https://api.github.com/users/polinaeterna/following{/other_user}", "gists_url": "https://api.github.com/users/polinaeterna/gists{/gist_id}", "starred_url": "https://api.github.com/users/polinaeterna/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/polinaeterna/subscriptions", "organizations_url": "https://api.github.com/users/polinaeterna/orgs", "repos_url": "https://api.github.com/users/polinaeterna/repos", "events_url": "https://api.github.com/users/polinaeterna/events{/privacy}", "received_events_url": "https://api.github.com/users/polinaeterna/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_4818). All of your documentation changes will be reflected on that endpoint.", "I think we can close this PR because the `standard_licenses.tsv` file was removed from this repo and we no longer perform any dataset card validation." ]
2022-08-10T17:18:39
2022-10-04T13:47:24
2022-10-04T13:47:24
CONTRIBUTOR
null
true
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4818", "html_url": "https://github.com/huggingface/datasets/pull/4818", "diff_url": "https://github.com/huggingface/datasets/pull/4818.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4818.patch", "merged_at": null }
- [ ] add it to moon-landing - [ ] add it to hub-docs
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4818/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4818/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4817
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4817/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4817/comments
https://api.github.com/repos/huggingface/datasets/issues/4817/events
https://github.com/huggingface/datasets/issues/4817
1,334,572,163
I_kwDODunzps5Pi_SD
4,817
Outdated Link for mkqa Dataset
{ "login": "liaeh", "id": 52380283, "node_id": "MDQ6VXNlcjUyMzgwMjgz", "avatar_url": "https://avatars.githubusercontent.com/u/52380283?v=4", "gravatar_id": "", "url": "https://api.github.com/users/liaeh", "html_url": "https://github.com/liaeh", "followers_url": "https://api.github.com/users/liaeh/followers", "following_url": "https://api.github.com/users/liaeh/following{/other_user}", "gists_url": "https://api.github.com/users/liaeh/gists{/gist_id}", "starred_url": "https://api.github.com/users/liaeh/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/liaeh/subscriptions", "organizations_url": "https://api.github.com/users/liaeh/orgs", "repos_url": "https://api.github.com/users/liaeh/repos", "events_url": "https://api.github.com/users/liaeh/events{/privacy}", "received_events_url": "https://api.github.com/users/liaeh/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[ "Thanks for reporting @liaeh, we are investigating this. " ]
2022-08-10T12:45:45
2022-08-11T09:37:52
2022-08-11T09:37:52
NONE
null
null
null
## Describe the bug The URL used to download the mkqa dataset is outdated. It seems the URL to download the dataset is currently https://github.com/apple/ml-mkqa/blob/main/dataset/mkqa.jsonl.gz instead of https://github.com/apple/ml-mkqa/raw/master/dataset/mkqa.jsonl.gz (master branch has been renamed to main). ## Steps to reproduce the bug ```python from datasets import load_dataset dataset = load_dataset("mkqa") ``` ## Expected results downloads the dataset ## Actual results ```python Downloading builder script: 4.79k/? [00:00<00:00, 201kB/s] Downloading metadata: 13.2k/? [00:00<00:00, 504kB/s] Downloading and preparing dataset mkqa/mkqa (download: 11.35 MiB, generated: 34.29 MiB, post-processed: Unknown size, total: 45.65 MiB) to /home/lhr/.cache/huggingface/datasets/mkqa/mkqa/1.0.0/5401489c674c81257cf563417aaaa5de2c7e26a1090ce9b10eb0404f10003d4d... Downloading data files: 0% 0/1 [00:00<?, ?it/s] --------------------------------------------------------------------------- FileNotFoundError Traceback (most recent call last) Input In [3], in <cell line: 3>() 1 from datasets import load_dataset ----> 3 dataset = load_dataset("mkqa") File ~/repos/punc-cap/venv/lib/python3.9/site-packages/datasets/load.py:1746, in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, ignore_verifications, keep_in_memory, save_infos, revision, use_auth_token, task, streaming, **config_kwargs) 1743 try_from_hf_gcs = path not in _PACKAGED_DATASETS_MODULES 1745 # Download and prepare data -> 1746 builder_instance.download_and_prepare( 1747 download_config=download_config, 1748 download_mode=download_mode, 1749 ignore_verifications=ignore_verifications, 1750 try_from_hf_gcs=try_from_hf_gcs, 1751 use_auth_token=use_auth_token, 1752 ) 1754 # Build dataset for splits 1755 keep_in_memory = ( 1756 keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size) 1757 ) File ~/repos/punc-cap/venv/lib/python3.9/site-packages/datasets/builder.py:704, in DatasetBuilder.download_and_prepare(self, download_config, download_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, **download_and_prepare_kwargs) 702 logger.warning("HF google storage unreachable. Downloading and preparing it from source") 703 if not downloaded_from_gcs: --> 704 self._download_and_prepare( 705 dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs 706 ) 707 # Sync info 708 self.info.dataset_size = sum(split.num_bytes for split in self.info.splits.values()) File ~/repos/punc-cap/venv/lib/python3.9/site-packages/datasets/builder.py:1227, in GeneratorBasedBuilder._download_and_prepare(self, dl_manager, verify_infos) 1226 def _download_and_prepare(self, dl_manager, verify_infos): -> 1227 super()._download_and_prepare(dl_manager, verify_infos, check_duplicate_keys=verify_infos) File ~/repos/punc-cap/venv/lib/python3.9/site-packages/datasets/builder.py:771, in DatasetBuilder._download_and_prepare(self, dl_manager, verify_infos, **prepare_split_kwargs) 769 split_dict = SplitDict(dataset_name=self.name) 770 split_generators_kwargs = self._make_split_generators_kwargs(prepare_split_kwargs) --> 771 split_generators = self._split_generators(dl_manager, **split_generators_kwargs) 773 # Checksums verification 774 if verify_infos and dl_manager.record_checksums: File ~/.cache/huggingface/modules/datasets_modules/datasets/mkqa/5401489c674c81257cf563417aaaa5de2c7e26a1090ce9b10eb0404f10003d4d/mkqa.py:130, in Mkqa._split_generators(self, dl_manager) 128 # download and extract URLs 129 urls_to_download = _URLS --> 130 downloaded_files = dl_manager.download_and_extract(urls_to_download) 132 return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]})] File ~/repos/punc-cap/venv/lib/python3.9/site-packages/datasets/download/download_manager.py:431, in DownloadManager.download_and_extract(self, url_or_urls) 415 def download_and_extract(self, url_or_urls): 416 """Download and extract given url_or_urls. 417 418 Is roughly equivalent to: (...) 429 extracted_path(s): `str`, extracted paths of given URL(s). 430 """ --> 431 return self.extract(self.download(url_or_urls)) File ~/repos/punc-cap/venv/lib/python3.9/site-packages/datasets/download/download_manager.py:309, in DownloadManager.download(self, url_or_urls) 306 download_func = partial(self._download, download_config=download_config) 308 start_time = datetime.now() --> 309 downloaded_path_or_paths = map_nested( 310 download_func, 311 url_or_urls, 312 map_tuple=True, 313 num_proc=download_config.num_proc, 314 disable_tqdm=not is_progress_bar_enabled(), 315 desc="Downloading data files", 316 ) 317 duration = datetime.now() - start_time 318 logger.info(f"Downloading took {duration.total_seconds() // 60} min") File ~/repos/punc-cap/venv/lib/python3.9/site-packages/datasets/utils/py_utils.py:393, in map_nested(function, data_struct, dict_only, map_list, map_tuple, map_numpy, num_proc, types, disable_tqdm, desc) 391 num_proc = 1 392 if num_proc <= 1 or len(iterable) <= num_proc: --> 393 mapped = [ 394 _single_map_nested((function, obj, types, None, True, None)) 395 for obj in logging.tqdm(iterable, disable=disable_tqdm, desc=desc) 396 ] 397 else: 398 split_kwds = [] # We organize the splits ourselve (contiguous splits) File ~/repos/punc-cap/venv/lib/python3.9/site-packages/datasets/utils/py_utils.py:394, in <listcomp>(.0) 391 num_proc = 1 392 if num_proc <= 1 or len(iterable) <= num_proc: 393 mapped = [ --> 394 _single_map_nested((function, obj, types, None, True, None)) 395 for obj in logging.tqdm(iterable, disable=disable_tqdm, desc=desc) 396 ] 397 else: 398 split_kwds = [] # We organize the splits ourselve (contiguous splits) File ~/repos/punc-cap/venv/lib/python3.9/site-packages/datasets/utils/py_utils.py:330, in _single_map_nested(args) 328 # Singleton first to spare some computation 329 if not isinstance(data_struct, dict) and not isinstance(data_struct, types): --> 330 return function(data_struct) 332 # Reduce logging to keep things readable in multiprocessing with tqdm 333 if rank is not None and logging.get_verbosity() < logging.WARNING: File ~/repos/punc-cap/venv/lib/python3.9/site-packages/datasets/download/download_manager.py:335, in DownloadManager._download(self, url_or_filename, download_config) 332 if is_relative_path(url_or_filename): 333 # append the relative path to the base_path 334 url_or_filename = url_or_path_join(self._base_path, url_or_filename) --> 335 return cached_path(url_or_filename, download_config=download_config) File ~/repos/punc-cap/venv/lib/python3.9/site-packages/datasets/utils/file_utils.py:185, in cached_path(url_or_filename, download_config, **download_kwargs) 181 url_or_filename = str(url_or_filename) 183 if is_remote_url(url_or_filename): 184 # URL, so get it from the cache (downloading if necessary) --> 185 output_path = get_from_cache( 186 url_or_filename, 187 cache_dir=cache_dir, 188 force_download=download_config.force_download, 189 proxies=download_config.proxies, 190 resume_download=download_config.resume_download, 191 user_agent=download_config.user_agent, 192 local_files_only=download_config.local_files_only, 193 use_etag=download_config.use_etag, 194 max_retries=download_config.max_retries, 195 use_auth_token=download_config.use_auth_token, 196 ignore_url_params=download_config.ignore_url_params, 197 download_desc=download_config.download_desc, 198 ) 199 elif os.path.exists(url_or_filename): 200 # File, and it exists. 201 output_path = url_or_filename File ~/repos/punc-cap/venv/lib/python3.9/site-packages/datasets/utils/file_utils.py:530, in get_from_cache(url, cache_dir, force_download, proxies, etag_timeout, resume_download, user_agent, local_files_only, use_etag, max_retries, use_auth_token, ignore_url_params, download_desc) 525 raise FileNotFoundError( 526 f"Cannot find the requested files in the cached path at {cache_path} and outgoing traffic has been" 527 " disabled. To enable file online look-ups, set 'local_files_only' to False." 528 ) 529 elif response is not None and response.status_code == 404: --> 530 raise FileNotFoundError(f"Couldn't find file at {url}") 531 _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") 532 if head_error is not None: FileNotFoundError: Couldn't find file at https://github.com/apple/ml-mkqa/raw/master/dataset/mkqa.jsonl.gz ``` ## Environment info - `datasets` version: 2.4.0 - Platform: Linux-5.13.0-40-generic-x86_64-with-glibc2.31 - Python version: 3.9.7 - PyArrow version: 9.0.0 - Pandas version: 1.4.2
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4817/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4817/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/4816
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4816/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4816/comments
https://api.github.com/repos/huggingface/datasets/issues/4816/events
https://github.com/huggingface/datasets/pull/4816
1,334,099,454
PR_kwDODunzps487kpq
4,816
Update version of opus_paracrawl dataset
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-08-10T05:39:44
2022-08-12T14:32:29
2022-08-12T14:17:56
MEMBER
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4816", "html_url": "https://github.com/huggingface/datasets/pull/4816", "diff_url": "https://github.com/huggingface/datasets/pull/4816.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4816.patch", "merged_at": "2022-08-12T14:17:56" }
This PR updates OPUS ParaCrawl from 7.1 to 9 version. Fix #4815.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4816/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4816/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4815
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4815/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4815/comments
https://api.github.com/repos/huggingface/datasets/issues/4815/events
https://github.com/huggingface/datasets/issues/4815
1,334,078,303
I_kwDODunzps5PhGtf
4,815
Outdated loading script for OPUS ParaCrawl dataset
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 2067388877, "node_id": "MDU6TGFiZWwyMDY3Mzg4ODc3", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20bug", "name": "dataset bug", "color": "2edb81", "default": false, "description": "A bug in a dataset script provided in the library" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[]
2022-08-10T05:12:34
2022-08-12T14:17:57
2022-08-12T14:17:57
MEMBER
null
null
null
## Describe the bug Our loading script for OPUS ParaCrawl loads its 7.1 version. Current existing version is 9.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4815/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4815/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/4814
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4814/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4814/comments
https://api.github.com/repos/huggingface/datasets/issues/4814/events
https://github.com/huggingface/datasets/issues/4814
1,333,356,230
I_kwDODunzps5PeWbG
4,814
Support CSV as metadata file format in AudioFolder/ImageFolder
{ "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
closed
false
{ "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false }
[ { "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false } ]
null
[]
2022-08-09T14:36:49
2022-08-31T11:59:08
2022-08-31T11:59:08
CONTRIBUTOR
null
null
null
Requested here: https://discuss.huggingface.co/t/how-to-structure-an-image-dataset-repo-using-the-image-folder-approach/21004. CSV is also used in AutoTrain for specifying metadata in image datasets.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4814/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4814/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/4813
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4813/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4813/comments
https://api.github.com/repos/huggingface/datasets/issues/4813/events
https://github.com/huggingface/datasets/pull/4813
1,333,287,756
PR_kwDODunzps48446r
4,813
Fix loading example in opus dataset cards
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-08-09T13:47:38
2022-08-09T17:52:15
2022-08-09T17:38:18
MEMBER
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4813", "html_url": "https://github.com/huggingface/datasets/pull/4813", "diff_url": "https://github.com/huggingface/datasets/pull/4813.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4813.patch", "merged_at": "2022-08-09T17:38:18" }
This PR: - fixes the examples to load the datasets, with the corrected dataset name, in their dataset cards for: - opus_dgt - opus_paracrawl - opus_wikipedia - fixes their dataset cards with the missing required information: title, data instances/fields/splits - enumerates the supported languages - adds a missing citation reference for opus_wikipedia Related to: - #4806
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4813/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4813/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4812
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4812/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4812/comments
https://api.github.com/repos/huggingface/datasets/issues/4812/events
https://github.com/huggingface/datasets/pull/4812
1,333,051,730
PR_kwDODunzps484Fzq
4,812
Fix bug in function validate_type for Python >= 3.9
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-08-09T10:32:42
2022-08-12T13:41:23
2022-08-12T13:27:04
MEMBER
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4812", "html_url": "https://github.com/huggingface/datasets/pull/4812", "diff_url": "https://github.com/huggingface/datasets/pull/4812.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4812.patch", "merged_at": "2022-08-12T13:27:04" }
Fix `validate_type` function, so that it uses `get_origin` instead. This makes the function forward compatible. This fixes #4811 because: ```python In [4]: typing.Optional[str] Out[4]: typing.Optional[str] In [5]: get_origin(typing.Optional[str]) Out[5]: typing.Union ``` Fix #4811.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4812/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4812/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4811
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4811/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4811/comments
https://api.github.com/repos/huggingface/datasets/issues/4811/events
https://github.com/huggingface/datasets/issues/4811
1,333,043,421
I_kwDODunzps5PdKDd
4,811
Bug in function validate_type for Python >= 3.9
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[]
2022-08-09T10:25:21
2022-08-12T13:27:05
2022-08-12T13:27:05
MEMBER
null
null
null
## Describe the bug The function `validate_type` assumes that the type `typing.Optional[str]` is automatically transformed to `typing.Union[str, NoneType]`. ```python In [4]: typing.Optional[str] Out[4]: typing.Union[str, NoneType] ``` However, this is not the case for Python 3.9: ```python In [3]: typing.Optional[str] Out[3]: typing.Optional[str] ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4811/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4811/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/4810
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4810/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4810/comments
https://api.github.com/repos/huggingface/datasets/issues/4810/events
https://github.com/huggingface/datasets/pull/4810
1,333,038,702
PR_kwDODunzps484C9l
4,810
Add description to hellaswag dataset
{ "login": "julien-c", "id": 326577, "node_id": "MDQ6VXNlcjMyNjU3Nw==", "avatar_url": "https://avatars.githubusercontent.com/u/326577?v=4", "gravatar_id": "", "url": "https://api.github.com/users/julien-c", "html_url": "https://github.com/julien-c", "followers_url": "https://api.github.com/users/julien-c/followers", "following_url": "https://api.github.com/users/julien-c/following{/other_user}", "gists_url": "https://api.github.com/users/julien-c/gists{/gist_id}", "starred_url": "https://api.github.com/users/julien-c/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/julien-c/subscriptions", "organizations_url": "https://api.github.com/users/julien-c/orgs", "repos_url": "https://api.github.com/users/julien-c/repos", "events_url": "https://api.github.com/users/julien-c/events{/privacy}", "received_events_url": "https://api.github.com/users/julien-c/received_events", "type": "User", "site_admin": false }
[ { "id": 4564477500, "node_id": "LA_kwDODunzps8AAAABEBBmPA", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20contribution", "name": "dataset contribution", "color": "0e8a16", "default": false, "description": "Contribution to a dataset script" } ]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "Are the `metadata JSON file` not on their way to deprecation? 😆😇\r\n\r\nIMO, more generally than this particular PR, the contribution process should be simplified now that many validation checks happen on the hub side.\r\n\r\nKeeping this open in the meantime to get more potential feedback!" ]
2022-08-09T10:21:14
2022-09-23T11:35:38
2022-09-23T11:33:44
MEMBER
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4810", "html_url": "https://github.com/huggingface/datasets/pull/4810", "diff_url": "https://github.com/huggingface/datasets/pull/4810.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4810.patch", "merged_at": "2022-09-23T11:33:44" }
null
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4810/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4810/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4809
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4809/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4809/comments
https://api.github.com/repos/huggingface/datasets/issues/4809/events
https://github.com/huggingface/datasets/pull/4809
1,332,842,747
PR_kwDODunzps483Y4h
4,809
Complete the mlqa dataset card
{ "login": "el2e10", "id": 7940237, "node_id": "MDQ6VXNlcjc5NDAyMzc=", "avatar_url": "https://avatars.githubusercontent.com/u/7940237?v=4", "gravatar_id": "", "url": "https://api.github.com/users/el2e10", "html_url": "https://github.com/el2e10", "followers_url": "https://api.github.com/users/el2e10/followers", "following_url": "https://api.github.com/users/el2e10/following{/other_user}", "gists_url": "https://api.github.com/users/el2e10/gists{/gist_id}", "starred_url": "https://api.github.com/users/el2e10/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/el2e10/subscriptions", "organizations_url": "https://api.github.com/users/el2e10/orgs", "repos_url": "https://api.github.com/users/el2e10/repos", "events_url": "https://api.github.com/users/el2e10/events{/privacy}", "received_events_url": "https://api.github.com/users/el2e10/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "> Thanks for your contribution, @eldhoittangeorge.\r\n> \r\n> The CI error message: https://github.com/huggingface/datasets/runs/7743526624?check_suite_focus=true\r\n> \r\n> ```\r\n> E ValueError: The following issues have been found in the dataset cards:\r\n> E YAML tags:\r\n> E __init__() missing 5 required positional arguments: 'annotations_creators', 'language_creators', 'license', 'size_categories', and 'source_datasets'\r\n> ```\r\n\r\nI will fix the CI error.", "@eldhoittangeorge, thanks again for all the fixes. Just a minor one before we can merge this PR: https://github.com/huggingface/datasets/runs/7744885754?check_suite_focus=true\r\n```\r\nE YAML tags:\r\nE Could not validate the metadata, found the following errors:\r\nE * field 'language_creators':\r\nE \t['unknown'] are not registered tags for 'language_creators', reference at https://github.com/huggingface/datasets/tree/main/src/datasets/utils/resources/creators.json\r\n```", "> \r\n\r\nThanks, I updated the file. \r\nA small suggestion can you mention this link https://github.com/huggingface/datasets/tree/main/src/datasets/utils/resources/ in the contribution page. So that others will know the acceptable values for the tags." ]
2022-08-09T07:38:06
2022-08-09T16:26:21
2022-08-09T13:26:43
CONTRIBUTOR
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4809", "html_url": "https://github.com/huggingface/datasets/pull/4809", "diff_url": "https://github.com/huggingface/datasets/pull/4809.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4809.patch", "merged_at": "2022-08-09T13:26:43" }
I fixed the issue #4808 Details of PR: - Added languages included in the dataset. - Added task id and task category. - Updated the citation information. Fix #4808.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4809/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4809/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4808
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4808/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4808/comments
https://api.github.com/repos/huggingface/datasets/issues/4808/events
https://github.com/huggingface/datasets/issues/4808
1,332,840,217
I_kwDODunzps5PcYcZ
4,808
Add more information to the dataset card of mlqa dataset
{ "login": "el2e10", "id": 7940237, "node_id": "MDQ6VXNlcjc5NDAyMzc=", "avatar_url": "https://avatars.githubusercontent.com/u/7940237?v=4", "gravatar_id": "", "url": "https://api.github.com/users/el2e10", "html_url": "https://github.com/el2e10", "followers_url": "https://api.github.com/users/el2e10/followers", "following_url": "https://api.github.com/users/el2e10/following{/other_user}", "gists_url": "https://api.github.com/users/el2e10/gists{/gist_id}", "starred_url": "https://api.github.com/users/el2e10/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/el2e10/subscriptions", "organizations_url": "https://api.github.com/users/el2e10/orgs", "repos_url": "https://api.github.com/users/el2e10/repos", "events_url": "https://api.github.com/users/el2e10/events{/privacy}", "received_events_url": "https://api.github.com/users/el2e10/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "el2e10", "id": 7940237, "node_id": "MDQ6VXNlcjc5NDAyMzc=", "avatar_url": "https://avatars.githubusercontent.com/u/7940237?v=4", "gravatar_id": "", "url": "https://api.github.com/users/el2e10", "html_url": "https://github.com/el2e10", "followers_url": "https://api.github.com/users/el2e10/followers", "following_url": "https://api.github.com/users/el2e10/following{/other_user}", "gists_url": "https://api.github.com/users/el2e10/gists{/gist_id}", "starred_url": "https://api.github.com/users/el2e10/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/el2e10/subscriptions", "organizations_url": "https://api.github.com/users/el2e10/orgs", "repos_url": "https://api.github.com/users/el2e10/repos", "events_url": "https://api.github.com/users/el2e10/events{/privacy}", "received_events_url": "https://api.github.com/users/el2e10/received_events", "type": "User", "site_admin": false }
[ { "login": "el2e10", "id": 7940237, "node_id": "MDQ6VXNlcjc5NDAyMzc=", "avatar_url": "https://avatars.githubusercontent.com/u/7940237?v=4", "gravatar_id": "", "url": "https://api.github.com/users/el2e10", "html_url": "https://github.com/el2e10", "followers_url": "https://api.github.com/users/el2e10/followers", "following_url": "https://api.github.com/users/el2e10/following{/other_user}", "gists_url": "https://api.github.com/users/el2e10/gists{/gist_id}", "starred_url": "https://api.github.com/users/el2e10/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/el2e10/subscriptions", "organizations_url": "https://api.github.com/users/el2e10/orgs", "repos_url": "https://api.github.com/users/el2e10/repos", "events_url": "https://api.github.com/users/el2e10/events{/privacy}", "received_events_url": "https://api.github.com/users/el2e10/received_events", "type": "User", "site_admin": false } ]
null
[ "#self-assign", "Fixed by:\r\n- #4809" ]
2022-08-09T07:35:42
2022-08-09T13:33:23
2022-08-09T13:33:23
CONTRIBUTOR
null
null
null
null
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4808/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4808/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/4807
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4807/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4807/comments
https://api.github.com/repos/huggingface/datasets/issues/4807/events
https://github.com/huggingface/datasets/pull/4807
1,332,784,110
PR_kwDODunzps483MSH
4,807
document fix in opus_gnome dataset
{ "login": "gojiteji", "id": 38291975, "node_id": "MDQ6VXNlcjM4MjkxOTc1", "avatar_url": "https://avatars.githubusercontent.com/u/38291975?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gojiteji", "html_url": "https://github.com/gojiteji", "followers_url": "https://api.github.com/users/gojiteji/followers", "following_url": "https://api.github.com/users/gojiteji/following{/other_user}", "gists_url": "https://api.github.com/users/gojiteji/gists{/gist_id}", "starred_url": "https://api.github.com/users/gojiteji/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gojiteji/subscriptions", "organizations_url": "https://api.github.com/users/gojiteji/orgs", "repos_url": "https://api.github.com/users/gojiteji/repos", "events_url": "https://api.github.com/users/gojiteji/events{/privacy}", "received_events_url": "https://api.github.com/users/gojiteji/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Duplicate:\r\n- #4806 " ]
2022-08-09T06:38:13
2022-08-09T07:28:03
2022-08-09T07:28:03
CONTRIBUTOR
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4807", "html_url": "https://github.com/huggingface/datasets/pull/4807", "diff_url": "https://github.com/huggingface/datasets/pull/4807.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4807.patch", "merged_at": null }
I fixed a issue #4805. I changed `"gnome"` to `"opus_gnome"` in[ README.md](https://github.com/huggingface/datasets/tree/main/datasets/opus_gnome#dataset-summary).
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4807/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4807/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4806
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4806/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4806/comments
https://api.github.com/repos/huggingface/datasets/issues/4806/events
https://github.com/huggingface/datasets/pull/4806
1,332,664,038
PR_kwDODunzps482yiS
4,806
Fix opus_gnome dataset card
{ "login": "gojiteji", "id": 38291975, "node_id": "MDQ6VXNlcjM4MjkxOTc1", "avatar_url": "https://avatars.githubusercontent.com/u/38291975?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gojiteji", "html_url": "https://github.com/gojiteji", "followers_url": "https://api.github.com/users/gojiteji/followers", "following_url": "https://api.github.com/users/gojiteji/following{/other_user}", "gists_url": "https://api.github.com/users/gojiteji/gists{/gist_id}", "starred_url": "https://api.github.com/users/gojiteji/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gojiteji/subscriptions", "organizations_url": "https://api.github.com/users/gojiteji/orgs", "repos_url": "https://api.github.com/users/gojiteji/repos", "events_url": "https://api.github.com/users/gojiteji/events{/privacy}", "received_events_url": "https://api.github.com/users/gojiteji/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "@gojiteji why have you closed this PR and created an identical one?\r\n- #4807 ", "@albertvillanova \r\nI forgot to follow \"How to create a Pull\" in CONTRIBUTING.md in this branch.", "Both are identical. And you can push additional commits to this branch.", "I see. Thank you for your comment.", "Anyway, @gojiteji thanks for your contribution and this fix.", "Once you have modified the `opus_gnome` dataset card, our Continuous Integration test suite performs some tests on it that make some additional requirements: the errors that appear have nothing to do with your contribution, but with these additional quality requirements.", "> the errors that appear have nothing to do with your contribution, but with these additional quality requirements.\r\n\r\nIs there anything I should do?", "If you would like to address them as well in this PR, it would be awesome: https://github.com/huggingface/datasets/runs/7741104780?check_suite_focus=true\r\n", "These are the 2 error messages:\r\n```\r\nE ValueError: The following issues have been found in the dataset cards:\r\nE README Validation:\r\nE The following issues were found for the README at `/home/runner/work/datasets/datasets/datasets/opus_gnome/README.md`:\r\nE -\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.\r\n\r\nE The following issues have been found in the dataset cards:\r\nE YAML tags:\r\nE Could not validate the metadata, found the following errors:\r\nE * field 'language':\r\nE \t['ara', 'cat', 'foo', 'gr', 'nqo', 'tmp'] are not registered tags for 'language', reference at https://github.com/huggingface/datasets/tree/main/src/datasets/utils/resources/languages.json\r\n```", "In principle there are 2 errors:\r\n\r\nThe first one says, the title of the README does not start with `Dataset Card for`:\r\n- The README title is: `# Dataset Card Creation Guide`\r\n- According to the [template here](https://github.com/huggingface/datasets/blob/main/templates/README.md), it should be: `# Dataset Card for [Dataset Name]`", "In relation with the languages:\r\n- you should check whether the language codes are properly spelled\r\n- and if so, adding them to our `languages.json` file, so that they are properly validated", "Thank you for the detailed information. I'm checking it now.", "```\r\nE ValueError: The following issues have been found in the dataset cards:\r\nE README Validation:\r\nE The following issues were found for the README at `/home/runner/work/datasets/datasets/datasets/opus_gnome/README.md`:\r\nE -\tExpected some content in section `Data Instances` but it is empty.\r\nE -\tExpected some content in section `Data Fields` but it is empty.\r\nE -\tExpected some content in section `Data Splits` but it is empty.\r\n```", "I added `ara`, `cat`, `gr`, and `nqo` to `languages.json` and removed `foo` and `tmp` from `README.md`.\r\nI also write Data Instances, Data Fields, and Data Splits in `README.md`.", "Thanks for your investigation and fixes to the dataset card structure! I'm just making some suggestions before merging this PR: see below.", "Should I create PR for `config.json` to add ` ara cat gr nqo` first?\r\nI think I can pass this failing after that.\r\n\r\nOr removing `ara, cat, gr, nqo, foo, tmp` from `README.md`. ", "Once you address these issues, all the CI tests will pass.", "Once the remaining changes are addressed (see unresolved above), we will be able to merge this:\r\n- [ ] Remove \"ara\" from README\r\n- [ ] Remove \"cat\" from README\r\n- [ ] Remove \"gr\" from README\r\n- [ ] Replace \"tmp\" with \"tyj\" in README\r\n- [ ] Add \"tyj\" to `languages.json`:\r\n ```\r\n \"tyj\": \"Tai Do; Tai Yo\",", "I did the five changes." ]
2022-08-09T03:40:15
2022-08-09T12:06:46
2022-08-09T11:52:04
CONTRIBUTOR
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4806", "html_url": "https://github.com/huggingface/datasets/pull/4806", "diff_url": "https://github.com/huggingface/datasets/pull/4806.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4806.patch", "merged_at": "2022-08-09T11:52:04" }
I fixed a issue #4805. I changed `"gnome"` to `"opus_gnome"` in[ README.md](https://github.com/huggingface/datasets/tree/main/datasets/opus_gnome#dataset-summary). Fix #4805
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4806/reactions", "total_count": 2, "+1": 0, "-1": 0, "laugh": 0, "hooray": 2, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4806/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4805
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4805/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4805/comments
https://api.github.com/repos/huggingface/datasets/issues/4805/events
https://github.com/huggingface/datasets/issues/4805
1,332,653,531
I_kwDODunzps5Pbq3b
4,805
Wrong example in opus_gnome dataset card
{ "login": "gojiteji", "id": 38291975, "node_id": "MDQ6VXNlcjM4MjkxOTc1", "avatar_url": "https://avatars.githubusercontent.com/u/38291975?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gojiteji", "html_url": "https://github.com/gojiteji", "followers_url": "https://api.github.com/users/gojiteji/followers", "following_url": "https://api.github.com/users/gojiteji/following{/other_user}", "gists_url": "https://api.github.com/users/gojiteji/gists{/gist_id}", "starred_url": "https://api.github.com/users/gojiteji/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gojiteji/subscriptions", "organizations_url": "https://api.github.com/users/gojiteji/orgs", "repos_url": "https://api.github.com/users/gojiteji/repos", "events_url": "https://api.github.com/users/gojiteji/events{/privacy}", "received_events_url": "https://api.github.com/users/gojiteji/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[]
2022-08-09T03:21:27
2022-08-09T11:52:05
2022-08-09T11:52:05
CONTRIBUTOR
null
null
null
## Describe the bug I found that [the example on opus_gone dataset ](https://github.com/huggingface/datasets/tree/main/datasets/opus_gnome#dataset-summary) doesn't work. ## Steps to reproduce the bug ```python load_dataset("gnome", lang1="it", lang2="pl") ``` `"gnome"` should be `"opus_gnome"` ## Expected results ```bash 100% 1/1 [00:00<00:00, 42.09it/s] DatasetDict({ train: Dataset({ features: ['id', 'translation'], num_rows: 8368 }) }) ``` ## Actual results ```bash Couldn't find 'gnome' on the Hugging Face Hub either: FileNotFoundError: Couldn't find file at https://raw.githubusercontent.com/huggingface/datasets/main/datasets/gnome/gnome.py ``` ## Environment info - `datasets` version: 2.4.0 - Platform: Linux-5.4.0-120-generic-x86_64-with-glibc2.27 - Python version: 3.9.13 - PyArrow version: 9.0.0 - Pandas version: 1.4.3
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4805/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4805/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/4804
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4804/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4804/comments
https://api.github.com/repos/huggingface/datasets/issues/4804/events
https://github.com/huggingface/datasets/issues/4804
1,332,630,358
I_kwDODunzps5PblNW
4,804
streaming dataset with concatenating splits raises an error
{ "login": "Bing-su", "id": 37621276, "node_id": "MDQ6VXNlcjM3NjIxMjc2", "avatar_url": "https://avatars.githubusercontent.com/u/37621276?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bing-su", "html_url": "https://github.com/Bing-su", "followers_url": "https://api.github.com/users/Bing-su/followers", "following_url": "https://api.github.com/users/Bing-su/following{/other_user}", "gists_url": "https://api.github.com/users/Bing-su/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bing-su/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bing-su/subscriptions", "organizations_url": "https://api.github.com/users/Bing-su/orgs", "repos_url": "https://api.github.com/users/Bing-su/repos", "events_url": "https://api.github.com/users/Bing-su/events{/privacy}", "received_events_url": "https://api.github.com/users/Bing-su/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
[ "Hi! Only the name of a particular split (\"train\", \"test\", ...) is supported as a split pattern if `streaming=True`. We plan to address this limitation soon.", "Hi, have you addressed this yet?", "yes, same error occurs.\r\n```python\r\nfrom datasets import load_dataset\r\n\r\n# error\r\nrepo = \"nateraw/ade20k-tiny\"\r\ndataset = load_dataset(repo, split=\"train+validation\", streaming=True)\r\n```\r\n\r\n```python\r\n---------------------------------------------------------------------------\r\nValueError Traceback (most recent call last)\r\n[<ipython-input-3-a6ae02d63899>](https://localhost:8080/#) in <cell line: 5>()\r\n 3 # error\r\n 4 repo = \"nateraw/ade20k-tiny\"\r\n----> 5 dataset = load_dataset(repo, split=\"train+validation\", streaming=True)\r\n\r\n1 frames\r\n[/usr/local/lib/python3.10/dist-packages/datasets/builder.py](https://localhost:8080/#) in as_streaming_dataset(self, split, base_path)\r\n 1265 splits_generator = splits_generators[split]\r\n 1266 else:\r\n-> 1267 raise ValueError(f\"Bad split: {split}. Available splits: {list(splits_generators)}\")\r\n 1268 \r\n 1269 # Create a dataset for each of the given splits\r\n\r\nValueError: Bad split: train+validation. Available splits: ['train', 'validation']\r\n```\r\n\r\ngoogle colab, `datasets==2.12.0`\r\n```\r\n- huggingface_hub version: 0.14.1\r\n- Platform: Linux-5.10.147+-x86_64-with-glibc2.31\r\n- Python version: 3.10.11\r\n- Running in iPython ?: No\r\n- Running in notebook ?: No\r\n- Running in Google Colab ?: No\r\n- Token path ?: /root/.cache/huggingface/token\r\n- Has saved token ?: False\r\n- Configured git credential helpers: \r\n- FastAI: 2.7.12\r\n- Tensorflow: 2.12.0\r\n- Torch: 2.0.0+cu118\r\n- Jinja2: 3.1.2\r\n- Graphviz: 0.20.1\r\n- Pydot: 1.4.2\r\n- Pillow: 8.4.0\r\n- hf_transfer: N/A\r\n- gradio: N/A\r\n- ENDPOINT: https://huggingface.co/\r\n- HUGGINGFACE_HUB_CACHE: /root/.cache/huggingface/hub\r\n- HUGGINGFACE_ASSETS_CACHE: /root/.cache/huggingface/assets\r\n- HF_TOKEN_PATH: /root/.cache/huggingface/token\r\n- HF_HUB_OFFLINE: False\r\n- HF_HUB_DISABLE_TELEMETRY: False\r\n- HF_HUB_DISABLE_PROGRESS_BARS: None\r\n- HF_HUB_DISABLE_SYMLINKS_WARNING: False\r\n- HF_HUB_DISABLE_EXPERIMENTAL_WARNING: False\r\n- HF_HUB_DISABLE_IMPLICIT_TOKEN: False\r\n- HF_HUB_ENABLE_HF_TRANSFER: False\r\n```\r\n" ]
2022-08-09T02:41:56
2023-05-11T01:42:59
null
NONE
null
null
null
## Describe the bug streaming dataset with concatenating splits raises an error ## Steps to reproduce the bug ```python from datasets import load_dataset # no error repo = "nateraw/ade20k-tiny" dataset = load_dataset(repo, split="train+validation") ``` ```python from datasets import load_dataset # error repo = "nateraw/ade20k-tiny" dataset = load_dataset(repo, split="train+validation", streaming=True) ``` ```sh --------------------------------------------------------------------------- ValueError Traceback (most recent call last) [<ipython-input-4-a6ae02d63899>](https://localhost:8080/#) in <module>() 3 # error 4 repo = "nateraw/ade20k-tiny" ----> 5 dataset = load_dataset(repo, split="train+validation", streaming=True) 1 frames [/usr/local/lib/python3.7/dist-packages/datasets/builder.py](https://localhost:8080/#) in as_streaming_dataset(self, split, base_path) 1030 splits_generator = splits_generators[split] 1031 else: -> 1032 raise ValueError(f"Bad split: {split}. Available splits: {list(splits_generators)}") 1033 1034 # Create a dataset for each of the given splits ValueError: Bad split: train+validation. Available splits: ['validation', 'train'] ``` [Colab](https://colab.research.google.com/drive/1wMj08_0bym9jnGgByib4lsBPu8NCZBG9?usp=sharing) ## Expected results load successfully or throws an error saying it is not supported. ## Actual results above ## Environment info - `datasets` version: 2.4.0 - Platform: Windows-10-10.0.22000-SP0 (windows11 x64) - Python version: 3.9.13 - PyArrow version: 8.0.0 - Pandas version: 1.4.3
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4804/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4804/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4803
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4803/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4803/comments
https://api.github.com/repos/huggingface/datasets/issues/4803/events
https://github.com/huggingface/datasets/issues/4803
1,332,079,562
I_kwDODunzps5PZevK
4,803
Support `pipeline` argument in inspect.py functions
{ "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
null
[]
null
[]
2022-08-08T16:01:24
2022-08-08T16:01:24
null
CONTRIBUTOR
null
null
null
**Is your feature request related to a problem? Please describe.** The `wikipedia` dataset requires a `pipeline` argument to build the list of splits: https://huggingface.co/datasets/wikipedia/blob/main/wikipedia.py#L937 But this is currently not supported in `get_dataset_config_info`: https://github.com/huggingface/datasets/blob/main/src/datasets/inspect.py#L373-L375 which is called by other functions, e.g. `get_dataset_split_names`. **Additional context** The dataset viewer is not working out-of-the-box on `wikipedia` for this reason: https://huggingface.co/datasets/wikipedia/viewer <img width="637" alt="Capture d’écran 2022-08-08 à 12 01 16" src="https://user-images.githubusercontent.com/1676121/183461838-5330783b-0269-4ba7-a999-314cde2023d8.png">
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4803/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4803/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4802
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4802/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4802/comments
https://api.github.com/repos/huggingface/datasets/issues/4802/events
https://github.com/huggingface/datasets/issues/4802
1,331,676,691
I_kwDODunzps5PX8YT
4,802
`with_format` behavior is inconsistent on different datasets
{ "login": "fxmarty", "id": 9808326, "node_id": "MDQ6VXNlcjk4MDgzMjY=", "avatar_url": "https://avatars.githubusercontent.com/u/9808326?v=4", "gravatar_id": "", "url": "https://api.github.com/users/fxmarty", "html_url": "https://github.com/fxmarty", "followers_url": "https://api.github.com/users/fxmarty/followers", "following_url": "https://api.github.com/users/fxmarty/following{/other_user}", "gists_url": "https://api.github.com/users/fxmarty/gists{/gist_id}", "starred_url": "https://api.github.com/users/fxmarty/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/fxmarty/subscriptions", "organizations_url": "https://api.github.com/users/fxmarty/orgs", "repos_url": "https://api.github.com/users/fxmarty/repos", "events_url": "https://api.github.com/users/fxmarty/events{/privacy}", "received_events_url": "https://api.github.com/users/fxmarty/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
[ "Hi! You can get a `torch.Tensor` if you do the following:\r\n```python\r\nraw = load_dataset(\"beans\", split=\"train\")\r\nraw = raw.select(range(100))\r\n\r\npreprocessor = AutoFeatureExtractor.from_pretrained(\"nateraw/vit-base-beans\")\r\n\r\nfrom datasets import Array3D\r\nfeatures = raw.features.copy()\r\nfeatures[\"pixel_values\"] = datasets.Array3D(shape=(3, 224, 224), dtype=\"float32\")\r\n\r\ndef preprocess_func(examples):\r\n imgs = [img.convert(\"RGB\") for img in examples[\"image\"]]\r\n return preprocessor(imgs)\r\n\r\ndata = raw.map(preprocess_func, batched=True, features=features)\r\n\r\nprint(type(data[0][\"pixel_values\"]))\r\n\r\ndata = data.with_format(\"torch\", columns=[\"pixel_values\"])\r\n\r\nprint(type(data[0][\"pixel_values\"]))\r\n```\r\n\r\nThe reason for this \"inconsistency\" in the default case is the way PyArrow infers the type of multi-dim arrays (in this case, the `pixel_values` column). If the type is not specified manually, PyArrow assumes it is a dynamic-length sequence (it needs to know the type before writing the first batch to a cache file, and it can't be sure the array is fixed ahead of time; `ArrayXD` is our way of telling that the dims are fixed), so it already fails to convert the corresponding array to NumPy properly (you get an array of `np.object` arrays). And `with_format(\"torch\")` replaces NumPy arrays with Torch tensors, so this bad formatting propagates." ]
2022-08-08T10:41:34
2022-08-09T16:49:09
null
CONTRIBUTOR
null
null
null
## Describe the bug I found a case where `with_format` does not transform the dataset to the requested format. ## Steps to reproduce the bug Run: ```python from transformers import AutoTokenizer, AutoFeatureExtractor from datasets import load_dataset raw = load_dataset("glue", "sst2", split="train") raw = raw.select(range(100)) tokenizer = AutoTokenizer.from_pretrained("philschmid/tiny-bert-sst2-distilled") def preprocess_func(examples): return tokenizer(examples["sentence"], padding=True, max_length=256, truncation=True) data = raw.map(preprocess_func, batched=True) print(type(data[0]["input_ids"])) data = data.with_format("torch", columns=["input_ids"]) print(type(data[0]["input_ids"])) ``` printing as expected: ```python <class 'list'> <class 'torch.Tensor'> ``` Then run: ```python raw = load_dataset("beans", split="train") raw = raw.select(range(100)) preprocessor = AutoFeatureExtractor.from_pretrained("nateraw/vit-base-beans") def preprocess_func(examples): imgs = [img.convert("RGB") for img in examples["image"]] return preprocessor(imgs) data = raw.map(preprocess_func, batched=True) print(type(data[0]["pixel_values"])) data = data.with_format("torch", columns=["pixel_values"]) print(type(data[0]["pixel_values"])) ``` Printing, unexpectedly ```python <class 'list'> <class 'list'> ``` ## Expected results `with_format` should transform into the requested format; it's not the case. ## Actual results `type(data[0]["pixel_values"])` should be `torch.Tensor` in the example above ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: dev version, commit 44af3fafb527302282f6b6507b952de7435f0979 - Platform: Linux - Python version: 3.9.12 - PyArrow version: 7.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4802/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4802/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4801
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4801/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4801/comments
https://api.github.com/repos/huggingface/datasets/issues/4801/events
https://github.com/huggingface/datasets/pull/4801
1,331,337,418
PR_kwDODunzps48yTYu
4,801
Fix fine classes in trec dataset
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-08-08T05:11:02
2022-08-22T16:29:14
2022-08-22T16:14:15
MEMBER
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4801", "html_url": "https://github.com/huggingface/datasets/pull/4801", "diff_url": "https://github.com/huggingface/datasets/pull/4801.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4801.patch", "merged_at": "2022-08-22T16:14:15" }
This PR: - replaces the fine labels, so that there are 50 instead of 47 - once more labels are added, all they (fine and coarse) have been re-ordered, so that they align with the order in: https://cogcomp.seas.upenn.edu/Data/QA/QC/definition.html - the feature names have been fixed: `fine_label` instead of `label-fine` - to sneak-case (underscores instead of hyphens) - words have been reordered Fix #4790.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4801/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4801/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4800
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4800/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4800/comments
https://api.github.com/repos/huggingface/datasets/issues/4800/events
https://github.com/huggingface/datasets/pull/4800
1,331,288,128
PR_kwDODunzps48yIss
4,800
support LargeListArray in pyarrow
{ "login": "xwwwwww", "id": 48146603, "node_id": "MDQ6VXNlcjQ4MTQ2NjAz", "avatar_url": "https://avatars.githubusercontent.com/u/48146603?v=4", "gravatar_id": "", "url": "https://api.github.com/users/xwwwwww", "html_url": "https://github.com/xwwwwww", "followers_url": "https://api.github.com/users/xwwwwww/followers", "following_url": "https://api.github.com/users/xwwwwww/following{/other_user}", "gists_url": "https://api.github.com/users/xwwwwww/gists{/gist_id}", "starred_url": "https://api.github.com/users/xwwwwww/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/xwwwwww/subscriptions", "organizations_url": "https://api.github.com/users/xwwwwww/orgs", "repos_url": "https://api.github.com/users/xwwwwww/repos", "events_url": "https://api.github.com/users/xwwwwww/events{/privacy}", "received_events_url": "https://api.github.com/users/xwwwwww/received_events", "type": "User", "site_admin": false }
[]
open
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_4800). All of your documentation changes will be reflected on that endpoint.", "Hi, thanks for working on this! Can you run `make style` at the repo root to fix the code quality error in CI and add a test?", "Hi, I have fixed the code quality error and added a test", "It seems that CI fails due to the lack of memory for allocating a large array, while I pass the test locally.", "Also, the current implementation of the NumPy-to-PyArrow conversion creates a lot of copies, which is not ideal for large arrays.\r\n\r\nWe can improve performance significantly if we rewrite this part:\r\nhttps://github.com/huggingface/datasets/blob/83f695c14507a3a38e9f4d84612cf49e5f50c153/src/datasets/features/features.py#L1322-L1323\r\n\r\nas\r\n```python\r\n values = pa.array(arr.ravel(), type=type) \r\n```", "@xwwwwww Feel free to ignore https://github.com/huggingface/datasets/pull/4800#issuecomment-1212280549 and revert the changes you've made to address it. \r\n\r\nWithout copying the array, this would be possible:\r\n```python\r\narr = np.array([\r\n [1, 2, 3],\r\n [4, 5, 6]\r\n])\r\n\r\ndset = Dataset.from_dict({\"data\": [arr]})\r\n\r\narr[0][0] = 100 # this change would be reflected in dset's PyArrow table -> a breaking change and also probably unexpected by the user \r\n```", "> @xwwwwww Feel free to ignore [#4800 (comment)](https://github.com/huggingface/datasets/pull/4800#issuecomment-1212280549) and revert the changes you've made to address it.\r\n> \r\n> Without copying the array, this would be possible:\r\n> \r\n> ```python\r\n> arr = np.array([\r\n> [1, 2, 3],\r\n> [4, 5, 6]\r\n> ])\r\n> \r\n> dset = Dataset.from_dict({\"data\": [arr]})\r\n> \r\n> arr[0][0] = 100 # this change would be reflected in dset's PyArrow table -> a breaking change and also probably unexpected by the user \r\n> ```\r\n\r\nOh, that makes sense.", "passed tests in ubuntu while failed in windows", "@mariosasko Hi, do you have any clue about this failure in windows?", "Perhaps we can skip the added test on Windows then.\r\n\r\nNot sure if this can help, but the ERR tool available on Windows outputs the following for the returned error code `-1073741819`:\r\n```\r\n# for decimal -1073741819 / hex 0xc0000005\r\n ISCSI_ERR_SETUP_NETWORK_NODE iscsilog.h\r\n# Failed to setup initiator portal. Error status is given in\r\n# the dump data.\r\n STATUS_ACCESS_VIOLATION ntstatus.h\r\n# The instruction at 0x%p referenced memory at 0x%p. The\r\n# memory could not be %s.\r\n USBD_STATUS_DEV_NOT_RESPONDING usb.h\r\n# as an HRESULT: Severity: FAILURE (1), FACILITY_NONE (0x0), Code 0x5\r\n# for decimal 5 / hex 0x5\r\n WINBIO_FP_TOO_FAST winbio_err.h\r\n# Move your finger more slowly on the fingerprint reader.\r\n# as an HRESULT: Severity: FAILURE (1), FACILITY_NULL (0x0), Code 0x5\r\n ERROR_ACCESS_DENIED winerror.h\r\n# Access is denied.\r\n# 5 matches found for \"-1073741819\"\r\n```", "What's the proper way to skip the added test in windows?\r\nI tried `if platform.system() == 'Linux'`, but the CI test seems stuck", "@mariosasko Hi, any idea about this :)", "Hi again! We want to skip the test on Windows but not on Linux. You can use this decorator to do so: \r\n```python\r\[email protected](os.name == \"nt\" and (os.getenv(\"CIRCLECI\") == \"true\" or os.getenv(\"GITHUB_ACTIONS\") == \"true\"), reason=\"The Windows CI runner does not have enough RAM to run this test\")\r\[email protected](...)\r\ndef test_large_array_xd_with_np(...):\r\n ...\r\n```", "> Hi again! We want to skip the test on Windows but not on Linux. You can use this decorator to do so:\r\n> \r\n> ```python\r\n> @pytest.mark.skipif(os.name == \"nt\" and (os.getenv(\"CIRCLECI\") == \"true\" or os.getenv(\"GITHUB_ACTIONS\") == \"true\"), reason=\"The Windows CI runner does not have enough RAM to run this test\")\r\n> @pytest.mark.parametrize(...)\r\n> def test_large_array_xd_with_np(...):\r\n> ...\r\n> ```\r\n\r\nCI on windows still stucks :(", "@mariosasko Hi, could you please take a look at this issue", "@mariosasko Hi, all checks have passed, and we are finally ready to merge this PR :)", "@lhoestq @albertvillanova Perhaps other maintainers can take a look and merge this PR :)" ]
2022-08-08T03:58:46
2022-10-20T16:34:04
null
CONTRIBUTOR
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4800", "html_url": "https://github.com/huggingface/datasets/pull/4800", "diff_url": "https://github.com/huggingface/datasets/pull/4800.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4800.patch", "merged_at": null }
```python import numpy as np import datasets a = np.zeros((5000000, 768)) res = datasets.Dataset.from_dict({'embedding': a}) ''' File '/home/wenjiaxin/anaconda3/envs/data/lib/python3.8/site-packages/datasets/arrow_writer.py', line 178, in __arrow_array__ out = numpy_to_pyarrow_listarray(data) File "/home/wenjiaxin/anaconda3/envs/data/lib/python3.8/site-packages/datasets/features/features.py", line 1173, in numpy_to_pyarrow_listarray offsets = pa.array(np.arange(n_offsets + 1) * step_offsets, type=pa.int32()) File "pyarrow/array.pxi", line 312, in pyarrow.lib.array File "pyarrow/array.pxi", line 83, in pyarrow.lib._ndarray_to_array File "pyarrow/error.pxi", line 100, in pyarrow.lib.check_status pyarrow.lib.ArrowInvalid: Integer value 2147483904 not in range: -2147483648 to 2147483647 ''' ``` Loading a large numpy array currently raises the error above as the type of offsets is `int32`. And pyarrow has supported [LargeListArray](https://arrow.apache.org/docs/python/generated/pyarrow.LargeListArray.html) for this case.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4800/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4800/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4799
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4799/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4799/comments
https://api.github.com/repos/huggingface/datasets/issues/4799/events
https://github.com/huggingface/datasets/issues/4799
1,330,889,854
I_kwDODunzps5PU8R-
4,799
video dataset loader/parser
{ "login": "nollied", "id": 26421036, "node_id": "MDQ6VXNlcjI2NDIxMDM2", "avatar_url": "https://avatars.githubusercontent.com/u/26421036?v=4", "gravatar_id": "", "url": "https://api.github.com/users/nollied", "html_url": "https://github.com/nollied", "followers_url": "https://api.github.com/users/nollied/followers", "following_url": "https://api.github.com/users/nollied/following{/other_user}", "gists_url": "https://api.github.com/users/nollied/gists{/gist_id}", "starred_url": "https://api.github.com/users/nollied/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nollied/subscriptions", "organizations_url": "https://api.github.com/users/nollied/orgs", "repos_url": "https://api.github.com/users/nollied/repos", "events_url": "https://api.github.com/users/nollied/events{/privacy}", "received_events_url": "https://api.github.com/users/nollied/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
closed
false
null
[]
null
[ "Hi! We've just started discussing the video support in `datasets` (decoding backends, video feature type, etc.), so I believe we should have something tangible by the end of this year.\r\n\r\nAlso, if you have additional video features in mind that you would like to see, feel free to let us know", "Coool thanks @mariosasko " ]
2022-08-07T01:54:12
2022-08-09T16:42:51
2022-08-09T16:42:51
CONTRIBUTOR
null
null
null
you know how you can [use `load_dataset` with any arbitrary csv file](https://huggingface.co/docs/datasets/loading#csv)? and you can also [use it to load a local image dataset](https://huggingface.co/docs/datasets/image_load#local-files)? could you please add functionality to load a video dataset? it would be really cool if i could point it to a bunch of video files and use pytorch to start looping through batches of videos. like if my batch size is 16, each sample in the batch is a frame from a video. i'm competing in the [minerl challenge](https://www.aicrowd.com/challenges/neurips-2022-minerl-basalt-competition), and it would be awesome to use the HF ecosystem.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4799/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4799/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/4798
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4798/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4798/comments
https://api.github.com/repos/huggingface/datasets/issues/4798/events
https://github.com/huggingface/datasets/pull/4798
1,330,699,942
PR_kwDODunzps48wVEG
4,798
Shard generator
{ "login": "marianna13", "id": 43296932, "node_id": "MDQ6VXNlcjQzMjk2OTMy", "avatar_url": "https://avatars.githubusercontent.com/u/43296932?v=4", "gravatar_id": "", "url": "https://api.github.com/users/marianna13", "html_url": "https://github.com/marianna13", "followers_url": "https://api.github.com/users/marianna13/followers", "following_url": "https://api.github.com/users/marianna13/following{/other_user}", "gists_url": "https://api.github.com/users/marianna13/gists{/gist_id}", "starred_url": "https://api.github.com/users/marianna13/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/marianna13/subscriptions", "organizations_url": "https://api.github.com/users/marianna13/orgs", "repos_url": "https://api.github.com/users/marianna13/repos", "events_url": "https://api.github.com/users/marianna13/events{/privacy}", "received_events_url": "https://api.github.com/users/marianna13/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi, thanks!\r\n\r\n> I was using Hugging Face datasets to process some very large datasets and found that it would be quite handy to have a feature that will allow to \"split\" these large datasets into chunks with equal size\r\n\r\n`map`, the method we use for processing in `datasets`, already does that if `batched=True`. And you can control the batch size with `batch_size`.\r\n\r\n> Even better - be able to run through these chunks one by one in simple and convenient way\r\n\r\nIt's not hard to do this \"manually\" with the existing API:\r\n```python\r\nbatch_size = <BATCH_SIZE>\r\nfor i in range(len(dset) // batch_size)\r\n shard = dset[i * batch_size:(i+1) * batch_size] # a dict of lists\r\n shard = Dataset.from_dict(shard)\r\n```\r\n(should be of similar performance to your implementation)\r\n\r\nStill, I think an API like that could be useful if implemented efficiently (see [this](https://discuss.huggingface.co/t/why-is-it-so-slow-to-access-data-through-iteration-with-hugginface-dataset/20385) discussion to understand what's the issue with `select`/`__getitem__` on which your implementation relies on), which can be done with `pa.Table.to_reader` in PyArrow 8.0.0+, .\r\n\r\n@lhoestq @albertvillanova wdyt? We could use such API to efficiently iterate over the batches in `map` before processing them.", "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_4798). All of your documentation changes will be reflected on that endpoint.", "This is more efficient since it doesn't bring the data in memory:\r\n```python\r\nfor i in range(len(dset) // batch_size)\r\n start = i * batch_size\r\n end = min((i+1) * batch_size, len(dset))\r\n shard = dset.select(range(start, end))\r\n```\r\n\r\n@marianna13 can you give more details on when it would be handy to have this shard generator ?", "> This is more efficient since it doesn't bring the data in memory:\r\n> \r\n> ```python\r\n> for i in range(len(dset) // batch_size)\r\n> start = i * batch_size\r\n> end = min((i+1) * batch_size, len(dset))\r\n> shard = dset.select(range(start, end))\r\n> ```\r\n> \r\n> @marianna13 can you give more details on when it would be handy to have this shard generator ?\r\n\r\nSure! I used such generator when I needed to process a very large dataset (>1TB) in parallel, I've found out empirically that it's much more efficient to do that by processing only one part of the dataset with the shard generator. I tried to use a map with batching but it causesd oom errors, I tried to use the normal shard and here's what I came up with. So I thought it might be helpful to someone else!", "I see thanks ! `map` should work just fine even at this scale, feel free to open an issue if you'd like to discuss your OOM issue.\r\n\r\nRegarding `shard_generator`, since it is pretty straightforward to get shards I'm not sure we need that extra Dataset method", "Hi again! We've just added `_iter_batches(batch_size)` to the `Dataset` API for fast iteration over batches/chunks, so I think we can close this PR. Compared to this implementation, `_iter_batches` leverages `pa.Table.to_reader` for chunking, which makes it significantly faster." ]
2022-08-06T09:14:06
2022-10-03T15:35:10
2022-10-03T15:35:10
NONE
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4798", "html_url": "https://github.com/huggingface/datasets/pull/4798", "diff_url": "https://github.com/huggingface/datasets/pull/4798.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4798.patch", "merged_at": null }
Hi everyone! I was using Hugging Face datasets to process some very large datasets and found that it would be quite handy to have a feature that will allow to "split" these large datasets into chunks with equal size. Even better - be able to run through these chunks one by one in simple and convenient way. So I decided to add the method called shard_generator() to the main Dataset class. It works similar to shard method but it returns a generator of datasets with equal size (defined by shard_size attribute). Example: ```python >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds Dataset({ features: ['text', 'label'], num_rows: 1066 }) >>> next(ds.shard_generator(300)) Dataset({ features: ['text', 'label'], num_rows: 300 }) ``` I hope it can be helpful to someone. Thanks!
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4798/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4798/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4797
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4797/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4797/comments
https://api.github.com/repos/huggingface/datasets/issues/4797/events
https://github.com/huggingface/datasets/pull/4797
1,330,000,998
PR_kwDODunzps48uL-t
4,797
Torgo dataset creation
{ "login": "YingLi001", "id": 75192317, "node_id": "MDQ6VXNlcjc1MTkyMzE3", "avatar_url": "https://avatars.githubusercontent.com/u/75192317?v=4", "gravatar_id": "", "url": "https://api.github.com/users/YingLi001", "html_url": "https://github.com/YingLi001", "followers_url": "https://api.github.com/users/YingLi001/followers", "following_url": "https://api.github.com/users/YingLi001/following{/other_user}", "gists_url": "https://api.github.com/users/YingLi001/gists{/gist_id}", "starred_url": "https://api.github.com/users/YingLi001/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/YingLi001/subscriptions", "organizations_url": "https://api.github.com/users/YingLi001/orgs", "repos_url": "https://api.github.com/users/YingLi001/repos", "events_url": "https://api.github.com/users/YingLi001/events{/privacy}", "received_events_url": "https://api.github.com/users/YingLi001/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi @YingLi001, thanks for your proposal to add this dataset.\r\n\r\nHowever, now we add datasets directly to the Hub (instead of our GitHub repository). You have the instructions in our docs: \r\n- [Create a dataset loading script](https://huggingface.co/docs/datasets/dataset_script)\r\n- [Create a dataset card](https://huggingface.co/docs/datasets/dataset_card)\r\n- [Share](https://huggingface.co/docs/datasets/share)\r\n\r\nFeel free to ask if you need any additional support/help." ]
2022-08-05T14:18:26
2022-08-09T18:46:00
2022-08-09T18:46:00
NONE
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4797", "html_url": "https://github.com/huggingface/datasets/pull/4797", "diff_url": "https://github.com/huggingface/datasets/pull/4797.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4797.patch", "merged_at": null }
null
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4797/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4797/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4796
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4796/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4796/comments
https://api.github.com/repos/huggingface/datasets/issues/4796/events
https://github.com/huggingface/datasets/issues/4796
1,329,887,810
I_kwDODunzps5PRHpC
4,796
ArrowInvalid: Could not convert <PIL.Image.Image image mode=RGB when adding image to Dataset
{ "login": "NielsRogge", "id": 48327001, "node_id": "MDQ6VXNlcjQ4MzI3MDAx", "avatar_url": "https://avatars.githubusercontent.com/u/48327001?v=4", "gravatar_id": "", "url": "https://api.github.com/users/NielsRogge", "html_url": "https://github.com/NielsRogge", "followers_url": "https://api.github.com/users/NielsRogge/followers", "following_url": "https://api.github.com/users/NielsRogge/following{/other_user}", "gists_url": "https://api.github.com/users/NielsRogge/gists{/gist_id}", "starred_url": "https://api.github.com/users/NielsRogge/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/NielsRogge/subscriptions", "organizations_url": "https://api.github.com/users/NielsRogge/orgs", "repos_url": "https://api.github.com/users/NielsRogge/repos", "events_url": "https://api.github.com/users/NielsRogge/events{/privacy}", "received_events_url": "https://api.github.com/users/NielsRogge/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
{ "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false }
[ { "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false } ]
{ "url": "https://api.github.com/repos/huggingface/datasets/milestones/10", "html_url": "https://github.com/huggingface/datasets/milestone/10", "labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/10/labels", "id": 9038583, "node_id": "MI_kwDODunzps4Aier3", "number": 10, "title": "3.0", "description": "Next major release", "creator": { "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false }, "open_issues": 3, "closed_issues": 0, "state": "open", "created_at": "2023-02-13T16:22:42", "updated_at": "2023-04-12T17:00:57", "due_on": null, "closed_at": null }
[ "@mariosasko I'm getting a similar issue when creating a Dataset from a Pandas dataframe, like so:\r\n\r\n```\r\nfrom datasets import Dataset, Features, Image, Value\r\nimport pandas as pd\r\nimport requests\r\nimport PIL\r\n\r\n# we need to define the features ourselves\r\nfeatures = Features({\r\n 'a': Value(dtype='int32'),\r\n 'b': Image(),\r\n})\r\n\r\nurl = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\r\nimage = PIL.Image.open(requests.get(url, stream=True).raw)\r\n\r\ndf = pd.DataFrame({\"a\": [1, 2], \r\n \"b\": [image, image]})\r\n\r\ndataset = Dataset.from_pandas(df, features=features) \r\n```\r\nresults in \r\n\r\n```\r\nArrowInvalid: ('Could not convert <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=640x480 at 0x7F7991A15C10> with type JpegImageFile: did not recognize Python value type when inferring an Arrow data type', 'Conversion failed for column b with type object')\r\n```\r\n\r\nWill the PR linked above also fix that?", "I would expect this to work, but it doesn't. Shouldn't be too hard to fix tho (in a subsequent PR).", "Hi @mariosasko just wanted to check in if there is a PR to follow for this. I was looking to create a demo app using this. If it's not working I can just use byte encoded images in the dataset which are not displayed. ", "Hi @darraghdog! No PR yet, but I plan to fix this before the next release.", "I was just pointed here by @mariosasko, meanwhile I found a workaround using `encode_example` like so:\r\n\r\n```\r\nfrom datasets import load_from_disk, Dataset\r\nDATASET_PATH = \"/hf/m4-master/data/cm4/cm4-10000-v0.1\"\r\nds1 = load_from_disk(DATASET_PATH)\r\nds2 = Dataset.from_dict(mapping={k: [] for k in ds1[99].keys()},\r\n features=ds1.features\r\n)\r\nfor i in range(2):\r\n # could add several representative items here\r\n row = ds1[99]\r\n row_encoded = ds2.features.encode_example(row)\r\n ds2 = ds2.add_item(row_encoded)\r\n```", "Hmm, interesting. If I create the dataset on the fly:\r\n\r\n```\r\nfrom datasets import load_from_disk, Dataset\r\nDATASET_PATH = \"/hf/m4-master/data/cm4/cm4-10000-v0.1\"\r\nds1 = load_from_disk(DATASET_PATH)\r\nds2 = Dataset.from_dict(mapping={k: [v]*2 for k, v in ds1[99].items()},\r\n features=ds1.features)\r\n```\r\n\r\nit doesn't fail with the error in the OP, as `from_dict` performs `encode_batch`.\r\n\r\nHowever if I try to use this dataset it fails now with:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/stas/anaconda3/envs/py38-pt112/lib/python3.8/site-packages/multiprocess/pool.py\", line 125, in worker\r\n result = (True, func(*args, **kwds))\r\n File \"/home/stas/anaconda3/envs/py38-pt112/lib/python3.8/site-packages/datasets/arrow_dataset.py\", line 557, in wrapper\r\n out: Union[\"Dataset\", \"DatasetDict\"] = func(self, *args, **kwargs)\r\n File \"/home/stas/anaconda3/envs/py38-pt112/lib/python3.8/site-packages/datasets/arrow_dataset.py\", line 524, in wrapper\r\n out: Union[\"Dataset\", \"DatasetDict\"] = func(self, *args, **kwargs)\r\n File \"/home/stas/anaconda3/envs/py38-pt112/lib/python3.8/site-packages/datasets/fingerprint.py\", line 480, in wrapper\r\n out = func(self, *args, **kwargs)\r\n File \"/home/stas/anaconda3/envs/py38-pt112/lib/python3.8/site-packages/datasets/arrow_dataset.py\", line 2775, in _map_single\r\n batch = apply_function_on_filtered_inputs(\r\n File \"/home/stas/anaconda3/envs/py38-pt112/lib/python3.8/site-packages/datasets/arrow_dataset.py\", line 2655, in apply_function_on_filtered_inputs\r\n processed_inputs = function(*fn_args, *additional_args, **fn_kwargs)\r\n File \"/home/stas/anaconda3/envs/py38-pt112/lib/python3.8/site-packages/datasets/arrow_dataset.py\", line 2347, in decorated\r\n result = f(decorated_item, *args, **kwargs)\r\n File \"debug_leak2.py\", line 235, in split_pack_and_pad\r\n images.append(image_transform(image.convert(\"RGB\")))\r\nAttributeError: 'dict' object has no attribute 'convert'\r\n\"\"\"\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"debug_leak2.py\", line 418, in <module>\r\n train_loader, val_loader = get_dataloaders()\r\n File \"debug_leak2.py\", line 348, in get_dataloaders\r\n dataset = dataset.map(mapper, batch_size=32, batched=True, remove_columns=dataset.column_names, num_proc=4)\r\n File \"/home/stas/anaconda3/envs/py38-pt112/lib/python3.8/site-packages/datasets/arrow_dataset.py\", line 2500, in map\r\n transformed_shards[index] = async_result.get()\r\n File \"/home/stas/anaconda3/envs/py38-pt112/lib/python3.8/site-packages/multiprocess/pool.py\", line 771, in get\r\n raise self._value\r\nAttributeError: 'dict' object has no attribute 'convert'\r\n```\r\n\r\nbut if I create that same dataset one item at a time as in the previous comment's code snippet it doesn't fail.\r\n\r\nThe features of this dataset are set to:\r\n\r\n```\r\n{'texts': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), \r\n'images': Sequence(feature=Image(decode=True, id=None), length=-1, id=None)}\r\n```", "> @mariosasko I'm getting a similar issue when creating a Dataset from a Pandas dataframe, like so:\r\n> \r\n> ```\r\n> from datasets import Dataset, Features, Image, Value\r\n> import pandas as pd\r\n> import requests\r\n> import PIL\r\n> \r\n> # we need to define the features ourselves\r\n> features = Features({\r\n> 'a': Value(dtype='int32'),\r\n> 'b': Image(),\r\n> })\r\n> \r\n> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\r\n> image = PIL.Image.open(requests.get(url, stream=True).raw)\r\n> \r\n> df = pd.DataFrame({\"a\": [1, 2], \r\n> \"b\": [image, image]})\r\n> \r\n> dataset = Dataset.from_pandas(df, features=features) \r\n> ```\r\n> \r\n> results in\r\n> \r\n> ```\r\n> ArrowInvalid: ('Could not convert <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=640x480 at 0x7F7991A15C10> with type JpegImageFile: did not recognize Python value type when inferring an Arrow data type', 'Conversion failed for column b with type object')\r\n> ```\r\n> \r\n> Will the PR linked above also fix that?\r\n\r\nIt looks like the problem still exists.\r\nAny news ? Any good workaround ?\r\n\r\nThank you", "There is a workaround: \r\nCreate a loader python scrypt and upload the dataset to huggingface.\r\n\r\nHere is an example how to do that:\r\n\r\nhttps://huggingface.co/datasets/jamescalam/image-text-demo/tree/main\r\n\r\nand Here are videos with explanations:\r\n\r\nhttps://www.youtube.com/watch?v=lqK4ocAKveE and https://www.youtube.com/watch?v=ODdKC30dT8c", "cc @mariosasko gentle ping for a fix :)", "Any update on this? I'm still facing this issure. Any workaround?", "I was facing the same issue. Downgrading datasets from 2.11.0 to 2.4.0 solved the issue. ", "> Any update on this? I'm still facing this issure. Any workaround?\r\n\r\nI was able to resolve my issue with a quick workaround: \r\n\r\n```\r\nfrom collections import defaultdict\r\nfrom datasets import Dataset\r\n \r\ndata = defaultdict(list)\r\nfor idx in tqdm(range( len(dataloader)),desc=\"Captioning...\"):\r\n img = dataloader[idx]\r\n data['image'].append(img)\r\n data['text'].append(f\"{img_{idx}})\r\n \r\ndataset = Dataset.from_dict(data)\r\ndataset = dataset.filter(lambda example: example['image'] is not None)\r\ndataset = dataset.filter(lambda example: example['text'] is not None)\r\n \r\ndataset.push_to_hub(path-to-repo', private=False)\r\n```\r\n\r\nHope it helps!\r\nHappy coding", "> > Any update on this? I'm still facing this issure. Any workaround?\r\n> \r\n> I was able to resolve my issue with a quick workaround:\r\n> \r\n> ```\r\n> from collections import defaultdict\r\n> from datasets import Dataset\r\n> \r\n> data = defaultdict(list)\r\n> for idx in tqdm(range( len(dataloader)),desc=\"Captioning...\"):\r\n> img = dataloader[idx]\r\n> data['image'].append(img)\r\n> data['text'].append(f\"{img_{idx}})\r\n> \r\n> dataset = Dataset.from_dict(data)\r\n> dataset = dataset.filter(lambda example: example['image'] is not None)\r\n> dataset = dataset.filter(lambda example: example['text'] is not None)\r\n> \r\n> dataset.push_to_hub(path-to-repo', private=False)\r\n> ```\r\n> \r\n> Hope it helps! Happy coding\r\n\r\nIt works!! " ]
2022-08-05T12:41:19
2023-04-26T11:20:25
null
CONTRIBUTOR
null
null
null
## Describe the bug When adding a Pillow image to an existing Dataset on the hub, `add_item` fails due to the Pillow image not being automatically converted into the Image feature. ## Steps to reproduce the bug ```python from datasets import load_dataset from PIL import Image dataset = load_dataset("hf-internal-testing/example-documents") # load any random Pillow image image = Image.open("/content/cord_example.png").convert("RGB") new_image = {'image': image} dataset['test'] = dataset['test'].add_item(new_image) ``` ## Expected results The image should be automatically casted to the Image feature when using `add_item`. For now, this can be fixed by using `encode_example`: ``` import datasets feature = datasets.Image(decode=False) new_image = {'image': feature.encode_example(image)} dataset['test'] = dataset['test'].add_item(new_image) ``` ## Actual results ``` ArrowInvalid: Could not convert <PIL.Image.Image image mode=RGB size=576x864 at 0x7F7CCC4589D0> with type Image: did not recognize Python value type when inferring an Arrow data type ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4796/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4796/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4795
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4795/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4795/comments
https://api.github.com/repos/huggingface/datasets/issues/4795/events
https://github.com/huggingface/datasets/issues/4795
1,329,525,732
I_kwDODunzps5PPvPk
4,795
Missing MBPP splits
{ "login": "stadlerb", "id": 2452384, "node_id": "MDQ6VXNlcjI0NTIzODQ=", "avatar_url": "https://avatars.githubusercontent.com/u/2452384?v=4", "gravatar_id": "", "url": "https://api.github.com/users/stadlerb", "html_url": "https://github.com/stadlerb", "followers_url": "https://api.github.com/users/stadlerb/followers", "following_url": "https://api.github.com/users/stadlerb/following{/other_user}", "gists_url": "https://api.github.com/users/stadlerb/gists{/gist_id}", "starred_url": "https://api.github.com/users/stadlerb/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stadlerb/subscriptions", "organizations_url": "https://api.github.com/users/stadlerb/orgs", "repos_url": "https://api.github.com/users/stadlerb/repos", "events_url": "https://api.github.com/users/stadlerb/events{/privacy}", "received_events_url": "https://api.github.com/users/stadlerb/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "Thanks for reporting this as well, @stadlerb.\r\n\r\nI suggest waiting for the answer of the data owners... ", "@albertvillanova The first author of the paper responded to the upstream issue:\r\n> Task IDs 11-510 are the 500 test problems. We use 90 problems (511-600) for validation and then remaining 374 for fine-tuning (601-974). The other problems can be used as desired, either for training or few-shot prompting (although this should be specified).", "Thanks for the follow-up, @stadlerb.\r\n\r\nWould you be willing to open a Pull Request to address this issue? :wink: ", "Opened a [PR](https://github.com/huggingface/datasets/pull/4943) to implement this--lmk if you have any feedback" ]
2022-08-05T06:51:01
2022-09-13T12:27:24
2022-09-13T12:27:24
NONE
null
null
null
(@albertvillanova) The [MBPP dataset on the Hub](https://huggingface.co/datasets/mbpp) has only a test split for both its "full" and its "sanitized" subset, while the [paper](https://arxiv.org/abs/2108.07732) states in subsection 2.1 regarding the full split: > In the experiments described later in the paper, we hold out 10 problems for **few-shot prompting**, another 500 as our **test** dataset (which is used to evaluate both few-shot inference and fine-tuned models), 374 problems for **fine-tuning**, and the rest for **validation**. If the dataset on the Hub should reproduce most closely what the original authors use, I guess this four-way split should be reflected. The paper doesn't explicitly state the task_id ranges of the splits, but the [GitHub readme](https://github.com/google-research/google-research/tree/master/mbpp) referenced in the paper specifies exact task_id ranges, although it misstates the total number of samples: > We specify a train and test split to use for evaluation. Specifically: > > * Task IDs 11-510 are used for evaluation. > * Task IDs 1-10 and 511-1000 are used for training and/or prompting. We typically used 1-10 for few-shot prompting, although you can feel free to use any of the training examples. I.e. the few-shot, train and validation splits are combined into one split, with a soft suggestion of using the first ten for few-shot prompting. It is not explicitly stated whether the 374 fine-tuning samples mentioned in the paper have task_id 511 to 784 or 601 to 974 or are randomly sampled from task_id 511 to 974. Regarding the "sanitized" split the paper states the following: > For evaluations involving the edited dataset, we perform comparisons with 100 problems that appear in both the original and edited dataset, using the same held out 10 problems for few-shot prompting and 374 problems for fine-tuning. The statement doesn't appear to be very precise, as among the 10 few-shot problems, those with task_id 1, 5 and 10 are not even part of the sanitized variant, and many from the task_id range from 511 to 974 are missing (e.g. task_id 511 to 553). I suppose the idea the task_id ranges for each split remain the same, even if some of the task_ids are not present. That would result in 7 few-shot, 257 test, 141 train and 22 validation examples in the sanitized split.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4795/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4795/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/4792
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4792/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4792/comments
https://api.github.com/repos/huggingface/datasets/issues/4792/events
https://github.com/huggingface/datasets/issues/4792
1,328,593,929
I_kwDODunzps5PMLwJ
4,792
Add DocVQA
{ "login": "NielsRogge", "id": 48327001, "node_id": "MDQ6VXNlcjQ4MzI3MDAx", "avatar_url": "https://avatars.githubusercontent.com/u/48327001?v=4", "gravatar_id": "", "url": "https://api.github.com/users/NielsRogge", "html_url": "https://github.com/NielsRogge", "followers_url": "https://api.github.com/users/NielsRogge/followers", "following_url": "https://api.github.com/users/NielsRogge/following{/other_user}", "gists_url": "https://api.github.com/users/NielsRogge/gists{/gist_id}", "starred_url": "https://api.github.com/users/NielsRogge/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/NielsRogge/subscriptions", "organizations_url": "https://api.github.com/users/NielsRogge/orgs", "repos_url": "https://api.github.com/users/NielsRogge/repos", "events_url": "https://api.github.com/users/NielsRogge/events{/privacy}", "received_events_url": "https://api.github.com/users/NielsRogge/received_events", "type": "User", "site_admin": false }
[ { "id": 2067376369, "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request", "name": "dataset request", "color": "e99695", "default": false, "description": "Requesting to add a new dataset" } ]
open
false
null
[]
null
[ "Thanks for proposing, @NielsRogge.\r\n\r\nPlease, note this dataset requires registering in their website and their Terms and Conditions state we cannot distribute their URL:\r\n```\r\n1. You will NOT distribute the download URLs\r\n...\r\n```" ]
2022-08-04T13:07:26
2022-08-08T05:31:20
null
CONTRIBUTOR
null
null
null
## Adding a Dataset - **Name:** DocVQA - **Description:** Document Visual Question Answering (DocVQA) seeks to inspire a “purpose-driven” point of view in Document Analysis and Recognition research, where the document content is extracted and used to respond to high-level tasks defined by the human consumers of this information. - **Paper:** https://arxiv.org/abs/2007.00398 - **Data:** https://www.docvqa.org/datasets/docvqa - **Motivation:** Models like LayoutLM and Donut in the Transformers library are fine-tuned on DocVQA. Would be very handy to directly load this dataset from the hub. Instructions to add a new dataset can be found [here](https://github.com/huggingface/datasets/blob/main/ADD_NEW_DATASET.md).
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4792/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4792/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4791
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4791/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4791/comments
https://api.github.com/repos/huggingface/datasets/issues/4791/events
https://github.com/huggingface/datasets/issues/4791
1,328,571,064
I_kwDODunzps5PMGK4
4,791
Dataset Viewer issue for Team-PIXEL/rendered-wikipedia-english
{ "login": "xplip", "id": 25847814, "node_id": "MDQ6VXNlcjI1ODQ3ODE0", "avatar_url": "https://avatars.githubusercontent.com/u/25847814?v=4", "gravatar_id": "", "url": "https://api.github.com/users/xplip", "html_url": "https://github.com/xplip", "followers_url": "https://api.github.com/users/xplip/followers", "following_url": "https://api.github.com/users/xplip/following{/other_user}", "gists_url": "https://api.github.com/users/xplip/gists{/gist_id}", "starred_url": "https://api.github.com/users/xplip/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/xplip/subscriptions", "organizations_url": "https://api.github.com/users/xplip/orgs", "repos_url": "https://api.github.com/users/xplip/repos", "events_url": "https://api.github.com/users/xplip/events{/privacy}", "received_events_url": "https://api.github.com/users/xplip/received_events", "type": "User", "site_admin": false }
[ { "id": 3470211881, "node_id": "LA_kwDODunzps7O1zsp", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset-viewer", "name": "dataset-viewer", "color": "E5583E", "default": false, "description": "Related to the dataset viewer on huggingface.co" } ]
closed
false
{ "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false }
[ { "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false } ]
null
[ "Thanks for reporting. It's a known issue that should be fixed soon. Meanwhile, I had to manually trigger the dataset viewer. It's OK now.\r\nNote that the extreme aspect ratio of the images generates another issue, that we're inspecting." ]
2022-08-04T12:49:16
2022-08-04T13:43:16
2022-08-04T13:43:16
NONE
null
null
null
### Link https://huggingface.co/datasets/Team-PIXEL/rendered-wikipedia-english/viewer/rendered-wikipedia-en/train ### Description The dataset can be loaded fine but the viewer shows this error: ``` Server Error Status code: 400 Exception: Status400Error Message: The dataset does not exist. ``` I'm guessing this is because I recently renamed the dataset. Based on related issues (e.g. https://github.com/huggingface/datasets/issues/4759) , is there something server-side that needs to be refreshed? ### Owner Yes
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4791/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4791/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/4790
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4790/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4790/comments
https://api.github.com/repos/huggingface/datasets/issues/4790/events
https://github.com/huggingface/datasets/issues/4790
1,328,546,904
I_kwDODunzps5PMARY
4,790
Issue with fine classes in trec dataset
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[]
2022-08-04T12:28:51
2022-08-22T16:14:16
2022-08-22T16:14:16
MEMBER
null
null
null
## Describe the bug According to their paper, the TREC dataset contains 2 kinds of classes: - 6 coarse classes: TREC-6 - 50 fine classes: TREC-50 However, our implementation only has 47 (instead of 50) fine classes. The reason for this is that we only considered the last segment of the label, which is repeated for several coarse classes: - We have one `desc` fine label instead of 2: - `DESC:desc` - `HUM:desc` - We have one `other` fine label instead of 3: - `ENTY:other` - `LOC:other` - `NUM:other` From their paper: > We define a two-layered taxonomy, which represents a natural semantic classification for typical answers in the TREC task. The hierarchy contains 6 coarse classes and 50 fine classes, > Each coarse class contains a non-overlapping set of fine classes.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4790/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4790/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/4789
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4789/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4789/comments
https://api.github.com/repos/huggingface/datasets/issues/4789/events
https://github.com/huggingface/datasets/pull/4789
1,328,409,253
PR_kwDODunzps48o3Kk
4,789
Update doc upload_dataset.mdx
{ "login": "mishig25", "id": 11827707, "node_id": "MDQ6VXNlcjExODI3NzA3", "avatar_url": "https://avatars.githubusercontent.com/u/11827707?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mishig25", "html_url": "https://github.com/mishig25", "followers_url": "https://api.github.com/users/mishig25/followers", "following_url": "https://api.github.com/users/mishig25/following{/other_user}", "gists_url": "https://api.github.com/users/mishig25/gists{/gist_id}", "starred_url": "https://api.github.com/users/mishig25/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mishig25/subscriptions", "organizations_url": "https://api.github.com/users/mishig25/orgs", "repos_url": "https://api.github.com/users/mishig25/repos", "events_url": "https://api.github.com/users/mishig25/events{/privacy}", "received_events_url": "https://api.github.com/users/mishig25/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-08-04T10:24:00
2022-09-09T16:37:10
2022-09-09T16:34:58
CONTRIBUTOR
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4789", "html_url": "https://github.com/huggingface/datasets/pull/4789", "diff_url": "https://github.com/huggingface/datasets/pull/4789.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4789.patch", "merged_at": "2022-09-09T16:34:58" }
null
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4789/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4789/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4788
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4788/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4788/comments
https://api.github.com/repos/huggingface/datasets/issues/4788/events
https://github.com/huggingface/datasets/pull/4788
1,328,246,021
PR_kwDODunzps48oUNx
4,788
Fix NonMatchingChecksumError in mbpp dataset
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "Thank you for the quick response! Before noticing that you already had implemented the fix, I already had implemened my own version. I'd also suggest bumping the major version because the contents of the dataset changed, even if only slightly.\r\nI'll attach my version of the affected files: [mbpp-checksum-changes.zip](https://github.com/huggingface/datasets/files/9258161/mbpp-checksum-changes.zip).", "Hi @stadlerb, thanks for your feedback.\r\n\r\nWe normally update the major version whenever there is a new dataset release, usually with a breaking change in schema. The patch version is updated whenever there is a small correction in the dataset that does not change its schema.\r\n\r\nAs a side note for future contributions, please note that this dataset is hosted in our library GitHub repository. Therefore, the PRs to GitHub-hosted datasets needs being done through GitHub.\r\n\r\nCurrently added datasets are hosted on the Hub and for them, PRs can be done through the Hub.", "I just noticed another problem with the dataset: The [GitHub page](https://github.com/google-research/google-research/tree/master/mbpp) and the [paper](http://arxiv.org/abs/2108.07732) mention a train-test split, which is not reflected in the dataloader. I'll open a new issue regarding this later." ]
2022-08-04T08:17:40
2022-08-04T17:34:00
2022-08-04T17:21:01
MEMBER
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4788", "html_url": "https://github.com/huggingface/datasets/pull/4788", "diff_url": "https://github.com/huggingface/datasets/pull/4788.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4788.patch", "merged_at": "2022-08-04T17:21:01" }
Fix issue reported on the Hub: https://huggingface.co/datasets/mbpp/discussions/1 Fix #4787.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4788/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4788/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4787
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4787/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4787/comments
https://api.github.com/repos/huggingface/datasets/issues/4787/events
https://github.com/huggingface/datasets/issues/4787
1,328,243,911
I_kwDODunzps5PK2TH
4,787
NonMatchingChecksumError in mbpp dataset
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[]
2022-08-04T08:15:51
2022-08-04T17:21:01
2022-08-04T17:21:01
MEMBER
null
null
null
## Describe the bug As reported on the Hub [Fix Checksum Mismatch](https://huggingface.co/datasets/mbpp/discussions/1), there is a `NonMatchingChecksumError` when loading mbpp dataset ## Steps to reproduce the bug ```python ds = load_dataset("mbpp", "full") ``` ## Expected results Loading of the dataset without any exception raised. ## Actual results ``` NonMatchingChecksumError Traceback (most recent call last) <ipython-input-1-a3fbdd3ed82e> in <module> ----> 1 ds = load_dataset("mbpp", "full") .../huggingface/datasets/src/datasets/load.py in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, ignore_verifications, keep_in_memory, save_infos, revision, use_auth_token, task, streaming, **config_kwargs) 1791 1792 # Download and prepare data -> 1793 builder_instance.download_and_prepare( 1794 download_config=download_config, 1795 download_mode=download_mode, .../huggingface/datasets/src/datasets/builder.py in download_and_prepare(self, download_config, download_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, **download_and_prepare_kwargs) 702 logger.warning("HF google storage unreachable. Downloading and preparing it from source") 703 if not downloaded_from_gcs: --> 704 self._download_and_prepare( 705 dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs 706 ) .../huggingface/datasets/src/datasets/builder.py in _download_and_prepare(self, dl_manager, verify_infos) 1225 1226 def _download_and_prepare(self, dl_manager, verify_infos): -> 1227 super()._download_and_prepare(dl_manager, verify_infos, check_duplicate_keys=verify_infos) 1228 1229 def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable: .../huggingface/datasets/src/datasets/builder.py in _download_and_prepare(self, dl_manager, verify_infos, **prepare_split_kwargs) 773 # Checksums verification 774 if verify_infos and dl_manager.record_checksums: --> 775 verify_checksums( 776 self.info.download_checksums, dl_manager.get_recorded_sizes_checksums(), "dataset source files" 777 ) .../huggingface/datasets/src/datasets/utils/info_utils.py in verify_checksums(expected_checksums, recorded_checksums, verification_name) 38 if len(bad_urls) > 0: 39 error_msg = "Checksums didn't match" + for_verification_name + ":\n" ---> 40 raise NonMatchingChecksumError(error_msg + str(bad_urls)) 41 logger.info("All the checksums matched successfully" + for_verification_name) 42 NonMatchingChecksumError: Checksums didn't match for dataset source files: ['https://raw.githubusercontent.com/google-research/google-research/master/mbpp/mbpp.jsonl'] ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4787/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4787/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/4786
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4786/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4786/comments
https://api.github.com/repos/huggingface/datasets/issues/4786/events
https://github.com/huggingface/datasets/issues/4786
1,327,340,828
I_kwDODunzps5PHZ0c
4,786
.save_to_disk('path', fs=s3) TypeError
{ "login": "h-k-dev", "id": 110547763, "node_id": "U_kgDOBpbTMw", "avatar_url": "https://avatars.githubusercontent.com/u/110547763?v=4", "gravatar_id": "", "url": "https://api.github.com/users/h-k-dev", "html_url": "https://github.com/h-k-dev", "followers_url": "https://api.github.com/users/h-k-dev/followers", "following_url": "https://api.github.com/users/h-k-dev/following{/other_user}", "gists_url": "https://api.github.com/users/h-k-dev/gists{/gist_id}", "starred_url": "https://api.github.com/users/h-k-dev/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/h-k-dev/subscriptions", "organizations_url": "https://api.github.com/users/h-k-dev/orgs", "repos_url": "https://api.github.com/users/h-k-dev/repos", "events_url": "https://api.github.com/users/h-k-dev/events{/privacy}", "received_events_url": "https://api.github.com/users/h-k-dev/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[]
2022-08-03T14:49:29
2022-08-03T15:23:00
2022-08-03T15:23:00
NONE
null
null
null
The following code: ```python import datasets train_dataset, test_dataset = load_dataset("imdb", split=["train", "test"]) s3 = datasets.filesystems.S3FileSystem(key=aws_access_key_id, secret=aws_secret_access_key) train_dataset.save_to_disk("s3://datasets/", fs=s3) ``` produces following traceback: ```shell File "C:\Users\Hong Knop\AppData\Local\Programs\Python\Python310\lib\site-packages\botocore\auth.py", line 374, in scope return '/'.join(scope) ``` I invoke print(scope) in <auth.py> (line 373) and find this: ```python [('4VA08VLL3VTKQJKCAI8M',), '20220803', 'us-east-1', 's3', 'aws4_request'] ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4786/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4786/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/4785
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4785/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4785/comments
https://api.github.com/repos/huggingface/datasets/issues/4785/events
https://github.com/huggingface/datasets/pull/4785
1,327,225,826
PR_kwDODunzps48k8y4
4,785
Require torchaudio<0.12.0 in docs
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-08-03T13:32:00
2022-08-03T15:07:43
2022-08-03T14:52:16
MEMBER
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4785", "html_url": "https://github.com/huggingface/datasets/pull/4785", "diff_url": "https://github.com/huggingface/datasets/pull/4785.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4785.patch", "merged_at": "2022-08-03T14:52:16" }
This PR adds to docs the requirement of torchaudio<0.12.0 to avoid RuntimeError. Subsequent to PR: - #4777
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4785/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4785/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4784
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4784/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4784/comments
https://api.github.com/repos/huggingface/datasets/issues/4784/events
https://github.com/huggingface/datasets/issues/4784
1,326,395,280
I_kwDODunzps5PDy-Q
4,784
Add Multiface dataset
{ "login": "osanseviero", "id": 7246357, "node_id": "MDQ6VXNlcjcyNDYzNTc=", "avatar_url": "https://avatars.githubusercontent.com/u/7246357?v=4", "gravatar_id": "", "url": "https://api.github.com/users/osanseviero", "html_url": "https://github.com/osanseviero", "followers_url": "https://api.github.com/users/osanseviero/followers", "following_url": "https://api.github.com/users/osanseviero/following{/other_user}", "gists_url": "https://api.github.com/users/osanseviero/gists{/gist_id}", "starred_url": "https://api.github.com/users/osanseviero/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/osanseviero/subscriptions", "organizations_url": "https://api.github.com/users/osanseviero/orgs", "repos_url": "https://api.github.com/users/osanseviero/repos", "events_url": "https://api.github.com/users/osanseviero/events{/privacy}", "received_events_url": "https://api.github.com/users/osanseviero/received_events", "type": "User", "site_admin": false }
[ { "id": 2067376369, "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request", "name": "dataset request", "color": "e99695", "default": false, "description": "Requesting to add a new dataset" }, { "id": 3608941089, "node_id": "LA_kwDODunzps7XHBIh", "url": "https://api.github.com/repos/huggingface/datasets/labels/vision", "name": "vision", "color": "bfdadc", "default": false, "description": "Vision datasets" } ]
open
false
null
[]
null
[ "Hi @osanseviero I would like to add this dataset.", "Hey @nandwalritik! Thanks for offering to help!\r\n\r\nThis dataset might be somewhat complex and I'm concerned about it being 65 TB, which would be quite expensive to host. @lhoestq @mariosasko I would love your input if you think it's worth adding this dataset.", "Thanks for proposing this interesting dataset, @osanseviero.\r\n\r\nPlease note that the data files are already hosted in a third-party server: e.g. the index of data files for entity \"6795937\" is at https://fb-baas-f32eacb9-8abb-11eb-b2b8-4857dd089e15.s3.amazonaws.com/MugsyDataRelease/v0.0/identities/6795937/index.html \r\n- audio.tar: https://fb-baas-f32eacb9-8abb-11eb-b2b8-4857dd089e15.s3.amazonaws.com/MugsyDataRelease/v0.0/identities/6795937/audio.tar\r\n- ...\r\n\r\nTherefore, in principle, we don't need to host them on our Hub: it would be enough to just implement a loading script in the corresponding Hub dataset repo, e.g. \"facebook/multiface\"..." ]
2022-08-02T21:00:22
2022-08-08T14:42:36
null
MEMBER
null
null
null
## Adding a Dataset - **Name:** Multiface dataset - **Description:** f high quality recordings of the faces of 13 identities, each captured in a multi-view capture stage performing various facial expressions. An average of 12,200 (v1 scripts) to 23,000 (v2 scripts) frames per subject with capture rate at 30 fps - **Data:** https://github.com/facebookresearch/multiface The whole dataset is 65TB though, so I'm not sure Instructions to add a new dataset can be found [here](https://github.com/huggingface/datasets/blob/main/ADD_NEW_DATASET.md).
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4784/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4784/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4783
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4783/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4783/comments
https://api.github.com/repos/huggingface/datasets/issues/4783/events
https://github.com/huggingface/datasets/pull/4783
1,326,375,011
PR_kwDODunzps48iHey
4,783
Docs for creating a loading script for image datasets
{ "login": "stevhliu", "id": 59462357, "node_id": "MDQ6VXNlcjU5NDYyMzU3", "avatar_url": "https://avatars.githubusercontent.com/u/59462357?v=4", "gravatar_id": "", "url": "https://api.github.com/users/stevhliu", "html_url": "https://github.com/stevhliu", "followers_url": "https://api.github.com/users/stevhliu/followers", "following_url": "https://api.github.com/users/stevhliu/following{/other_user}", "gists_url": "https://api.github.com/users/stevhliu/gists{/gist_id}", "starred_url": "https://api.github.com/users/stevhliu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stevhliu/subscriptions", "organizations_url": "https://api.github.com/users/stevhliu/orgs", "repos_url": "https://api.github.com/users/stevhliu/repos", "events_url": "https://api.github.com/users/stevhliu/events{/privacy}", "received_events_url": "https://api.github.com/users/stevhliu/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892861, "node_id": "MDU6TGFiZWwxOTM1ODkyODYx", "url": "https://api.github.com/repos/huggingface/datasets/labels/documentation", "name": "documentation", "color": "0075ca", "default": true, "description": "Improvements or additions to documentation" } ]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "IMO it would make more sense to add a \"Create image dataset\" page with two main sections - a no-code approach with `imagefolder` + metadata (preferred way), and with a loading script (advanced). It should be clear when to choose which. If we leave this as-is, the user who jumps straight to the Vision section could be under the impression that writing a loading script is the preferred way to share a vision dataset due to how this subsection starts:\r\n```\r\nWrite a dataset loading script to share a dataset.\r\n```\r\n \r\nAlso, I think a note explaining how to make a dataset gated/disable the viewer to hide the data would be beneficial (it's pretty common to require submitting a form to access a CV dataset).", "Great suggestion @mariosasko! I added your suggestions, let me know what you think. For gated dataset access, I just added a tip referring users to the relevant docs since it's more of a Hub feature than `datasets` feature.", "Thanks, looks much better now :). I would also move the sections explaining how to create an `imagefolder` for the specific task from the [loading page](https://raw.githubusercontent.com/huggingface/datasets/main/docs/source/image_load.mdx) to this one. IMO it makes more sense to have the basic info (imagefolder structure + `load_dataset` call) there + a link to this page for info on how to create an image folder dataset.", "Good idea! Moved everything about `imagefolder` + metadata to the create an image dataset section since the `load_dataset` call is the same for different computer vision tasks. ", "Thanks for all the feedbacks! 🥰\r\n\r\nWhat do you think about creating how to share an `ImageFolder` dataset in a separate PR? I think we should create a new section under `Vision` for how to share an image dataset.", "I love it thanks ! I think moving forward we can use CSV instead of JSON Lines in the docs ;)" ]
2022-08-02T20:36:03
2022-09-09T17:08:14
2022-09-07T19:07:34
MEMBER
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4783", "html_url": "https://github.com/huggingface/datasets/pull/4783", "diff_url": "https://github.com/huggingface/datasets/pull/4783.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4783.patch", "merged_at": "2022-09-07T19:07:34" }
This PR is a first draft of creating a loading script for image datasets. Feel free to let me know if there are any specificities I'm missing for this. 🙂 To do: - [x] Document how to create different configurations.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4783/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4783/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4782
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4782/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4782/comments
https://api.github.com/repos/huggingface/datasets/issues/4782/events
https://github.com/huggingface/datasets/issues/4782
1,326,247,158
I_kwDODunzps5PDOz2
4,782
pyarrow.lib.ArrowCapacityError: array cannot contain more than 2147483646 bytes, have 2147483648
{ "login": "conceptofmind", "id": 25208228, "node_id": "MDQ6VXNlcjI1MjA4MjI4", "avatar_url": "https://avatars.githubusercontent.com/u/25208228?v=4", "gravatar_id": "", "url": "https://api.github.com/users/conceptofmind", "html_url": "https://github.com/conceptofmind", "followers_url": "https://api.github.com/users/conceptofmind/followers", "following_url": "https://api.github.com/users/conceptofmind/following{/other_user}", "gists_url": "https://api.github.com/users/conceptofmind/gists{/gist_id}", "starred_url": "https://api.github.com/users/conceptofmind/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/conceptofmind/subscriptions", "organizations_url": "https://api.github.com/users/conceptofmind/orgs", "repos_url": "https://api.github.com/users/conceptofmind/repos", "events_url": "https://api.github.com/users/conceptofmind/events{/privacy}", "received_events_url": "https://api.github.com/users/conceptofmind/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "Thanks for reporting @conceptofmind.\r\n\r\nCould you please give details about your environment? \r\n```\r\n## Environment info\r\n<!-- You can run the command `datasets-cli env` and copy-and-paste its output below. -->\r\n- `datasets` version:\r\n- Platform:\r\n- Python version:\r\n- PyArrow version:\r\n```", "Hi @albertvillanova ,\r\n\r\nHere is the environment information:\r\n```\r\n- `datasets` version: 2.3.2\r\n- Platform: Linux-5.4.0-122-generic-x86_64-with-glibc2.27\r\n- Python version: 3.9.12\r\n- PyArrow version: 7.0.0\r\n- Pandas version: 1.4.2\r\n```\r\nThanks,\r\n\r\nEnrico", "I think this issue is solved here https://discuss.huggingface.co/t/minhash-deduplication/19992/12?u=loubnabnl, this only happens for very large datasets we will update it in CodeParrot code", "Hi @loubnabnl,\r\n\r\nYes, the issue is solved in the discussion thread.\r\n\r\nI will close this issue.\r\n\r\nThank you again for all of your help.\r\n\r\nEnrico", "Thanks @loubnabnl for pointing out the solution to this issue." ]
2022-08-02T18:36:05
2022-08-22T09:46:28
2022-08-20T02:11:53
NONE
null
null
null
## Describe the bug Following the example in CodeParrot, I receive an array size limitation error when deduplicating larger datasets. ## Steps to reproduce the bug ```python dataset_name = "the_pile" ds = load_dataset(dataset_name, split="train") ds = ds.map(preprocess, num_proc=num_workers) uniques = set(ds.unique("hash")) ``` Gists for minimum reproducible example: https://gist.github.com/conceptofmind/c5804428ea1bd89767815f9cd5f02d9a https://gist.github.com/conceptofmind/feafb07e236f28d79c2d4b28ffbdb6e2 ## Expected results Chunking and writing out a deduplicated dataset. ## Actual results ``` return dataset._data.column(column).unique().to_pylist() File "pyarrow/table.pxi", line 394, in pyarrow.lib.ChunkedArray.unique File "pyarrow/_compute.pyx", line 531, in pyarrow._compute.call_function File "pyarrow/_compute.pyx", line 330, in pyarrow._compute.Function.call File "pyarrow/error.pxi", line 143, in pyarrow.lib.pyarrow_internal_check_status File "pyarrow/error.pxi", line 124, in pyarrow.lib.check_status pyarrow.lib.ArrowCapacityError: array cannot contain more than 2147483646 bytes, have 2147483648 ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4782/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4782/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/4781
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4781/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4781/comments
https://api.github.com/repos/huggingface/datasets/issues/4781/events
https://github.com/huggingface/datasets/pull/4781
1,326,114,161
PR_kwDODunzps48hOie
4,781
Fix label renaming and add a battery of tests
{ "login": "Rocketknight1", "id": 12866554, "node_id": "MDQ6VXNlcjEyODY2NTU0", "avatar_url": "https://avatars.githubusercontent.com/u/12866554?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Rocketknight1", "html_url": "https://github.com/Rocketknight1", "followers_url": "https://api.github.com/users/Rocketknight1/followers", "following_url": "https://api.github.com/users/Rocketknight1/following{/other_user}", "gists_url": "https://api.github.com/users/Rocketknight1/gists{/gist_id}", "starred_url": "https://api.github.com/users/Rocketknight1/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Rocketknight1/subscriptions", "organizations_url": "https://api.github.com/users/Rocketknight1/orgs", "repos_url": "https://api.github.com/users/Rocketknight1/repos", "events_url": "https://api.github.com/users/Rocketknight1/events{/privacy}", "received_events_url": "https://api.github.com/users/Rocketknight1/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "Why don't we deprecate label renaming already instead ?", "I think it'll break a lot of workflows if we deprecate it now! There isn't really a non-deprecated workflow yet - once we've added the `auto_rename_labels` option, then we can have `prepare_tf_dataset` on the `transformers` side use that, and then we can consider setting the default option to `False`, or beginning to deprecate it somehow.", "I'm worried it's a bit of a waste of time to continue working on this behavior that shouldn't be here in the first place. Do you have a plan in mind ?", "@lhoestq Broadly! The plan is:\r\n\r\n1) Create the `auto_rename_labels` flag with this PR and skip label renaming if it isn't set. Leave it as `True` for backward compatibility.\r\n2) Add the label renaming logic to `model.prepare_tf_dataset` in `transformers`. That method calls `to_tf_dataset()` right now. Once the label renaming logic is moved there, `model.prepare_tf_dataset` will set `auto_rename_labels=False` when calling `to_tf_dataset()`, and do label renaming itself.\r\n\r\nAfter step 2, `auto_rename_labels` is now only necessary for backward compatibility when users use `to_tf_dataset` directly. I want to leave it alone for a while because the `model.prepare_tf_dataset` workflow is very new. However, once it is established, we can deprecate `auto_rename_labels` and then finally remove it from the `datasets` code and keep it in `transformers` where it belongs.", "I see ! Could it be possible to not add `auto_rename_labels` at all, since you want to remove it at the end ? Something roughly like this:\r\n1. show a warning in `to_tf_dataset` whevener a label is renamed automatically, saying that in the next major release this will be removed\r\n1. add the label renaming logic in `transformers` (to not have the warning)\r\n1. after some time, do a major release 3.0.0 and remove label renaming completely in `to_tf_dataset`\r\n\r\nWhat do you think ? cc @LysandreJik in case you have an opinion on this process.", "@lhoestq I think that plan is mostly good, but if we make the change to `datasets` first then all users will keep getting deprecation warnings until we update the method in `transformers` and release a new version. \r\n\r\nI think we can follow your plan, but make the change to `transformers` first and wait for a new release before changing `datasets` - that way there are no visible warnings or API changes for users using `prepare_tf_dataset`. It also gives us more time to update the docs and try to move people to `prepare_tf_dataset` so they aren't confused by this!", "Sounds good to me ! To summarize:\r\n1. add the label renaming logic in `transformers` + release\r\n1. show a warning in `to_tf_dataset` whevener a label is renamed automatically, saying that in the next major release this will be removed + minor release\r\n1. after some time, do a major release 3.0.0 and remove label renaming completely in `to_tf_dataset`", "Yep, that's the plan! ", "@lhoestq Are you okay with me merging this for now? ", "Can you remove `auto_rename_labels` ? I don't think it's a good idea to add it if the plan is to remove it later", "Right now, the `auto_rename_labels` behaviour happens in all cases! Making it an option is the first step in the process of disabling it (and moving the functionality to `transformers`) and then finally deprecating it." ]
2022-08-02T16:42:07
2022-09-12T11:27:06
2022-09-12T11:24:45
MEMBER
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4781", "html_url": "https://github.com/huggingface/datasets/pull/4781", "diff_url": "https://github.com/huggingface/datasets/pull/4781.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4781.patch", "merged_at": "2022-09-12T11:24:45" }
This PR makes some changes to label renaming in `to_tf_dataset()`, both to fix some issues when users input something we weren't expecting, and also to make it easier to deprecate label renaming in future, if/when we want to move this special-casing logic to a function in `transformers`. The main changes are: - Label renaming now only happens when the `auto_rename_labels` argument is set. For backward compatibility, this defaults to `True` for now. - If the user requests "label" but the data collator renames that column to "labels", the label renaming logic will now handle that case correctly. - Added a battery of tests to make this more reliable in future. - Adds an optimization to loading in `to_tf_dataset()` for unshuffled datasets (uses slicing instead of a list of indices) Fixes #4772
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4781/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4781/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4780
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4780/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4780/comments
https://api.github.com/repos/huggingface/datasets/issues/4780/events
https://github.com/huggingface/datasets/pull/4780
1,326,034,767
PR_kwDODunzps48g9oA
4,780
Remove apache_beam import from module level in natural_questions dataset
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-08-02T15:34:54
2022-08-02T16:16:33
2022-08-02T16:03:17
MEMBER
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4780", "html_url": "https://github.com/huggingface/datasets/pull/4780", "diff_url": "https://github.com/huggingface/datasets/pull/4780.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4780.patch", "merged_at": "2022-08-02T16:03:17" }
Instead of importing `apache_beam` at the module level, import it in the method `_build_pcollection`. Fix #4779.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4780/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4780/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4779
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4779/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4779/comments
https://api.github.com/repos/huggingface/datasets/issues/4779/events
https://github.com/huggingface/datasets/issues/4779
1,325,997,225
I_kwDODunzps5PCRyp
4,779
Loading natural_questions requires apache_beam even with existing preprocessed data
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[]
2022-08-02T15:06:57
2022-08-02T16:03:18
2022-08-02T16:03:18
MEMBER
null
null
null
## Describe the bug When loading "natural_questions", the package "apache_beam" is required: ``` ImportError: To be able to use natural_questions, you need to install the following dependency: apache_beam. Please install it using 'pip install apache_beam' for instance' ``` This requirement is unnecessary, once there exists preprocessed data and the script just needs to download it. ## Steps to reproduce the bug ```python load_dataset("natural_questions", "dev", split="validation", revision="main") ``` ## Expected results No ImportError raised. ## Actual results ``` ImportError Traceback (most recent call last) [<ipython-input-3-c938e7c05d02>](https://localhost:8080/#) in <module>() ----> 1 from datasets import load_dataset; ds = load_dataset("natural_questions", "dev", split="validation", revision="main") [/usr/local/lib/python3.7/dist-packages/datasets/load.py](https://localhost:8080/#) in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, ignore_verifications, keep_in_memory, save_infos, revision, use_auth_token, task, streaming, **config_kwargs) 1732 revision=revision, 1733 use_auth_token=use_auth_token, -> 1734 **config_kwargs, 1735 ) 1736 [/usr/local/lib/python3.7/dist-packages/datasets/load.py](https://localhost:8080/#) in load_dataset_builder(path, name, data_dir, data_files, cache_dir, features, download_config, download_mode, revision, use_auth_token, **config_kwargs) 1504 download_mode=download_mode, 1505 data_dir=data_dir, -> 1506 data_files=data_files, 1507 ) 1508 [/usr/local/lib/python3.7/dist-packages/datasets/load.py](https://localhost:8080/#) in dataset_module_factory(path, revision, download_config, download_mode, dynamic_modules_path, data_dir, data_files, **download_kwargs) 1245 f"Couldn't find '{path}' on the Hugging Face Hub either: {type(e1).__name__}: {e1}" 1246 ) from None -> 1247 raise e1 from None 1248 else: 1249 raise FileNotFoundError( [/usr/local/lib/python3.7/dist-packages/datasets/load.py](https://localhost:8080/#) in dataset_module_factory(path, revision, download_config, download_mode, dynamic_modules_path, data_dir, data_files, **download_kwargs) 1180 download_config=download_config, 1181 download_mode=download_mode, -> 1182 dynamic_modules_path=dynamic_modules_path, 1183 ).get_module() 1184 elif path.count("/") == 1: # community dataset on the Hub [/usr/local/lib/python3.7/dist-packages/datasets/load.py](https://localhost:8080/#) in get_module(self) 490 base_path=hf_github_url(path=self.name, name="", revision=revision), 491 imports=imports, --> 492 download_config=self.download_config, 493 ) 494 additional_files = [(config.DATASETDICT_INFOS_FILENAME, dataset_infos_path)] if dataset_infos_path else [] [/usr/local/lib/python3.7/dist-packages/datasets/load.py](https://localhost:8080/#) in _download_additional_modules(name, base_path, imports, download_config) 214 _them_str = "them" if len(needs_to_be_installed) > 1 else "it" 215 raise ImportError( --> 216 f"To be able to use {name}, you need to install the following {_depencencies_str}: " 217 f"{', '.join(needs_to_be_installed)}.\nPlease install {_them_str} using 'pip install " 218 f"{' '.join(needs_to_be_installed.values())}' for instance'" ImportError: To be able to use natural_questions, you need to install the following dependency: apache_beam. Please install it using 'pip install apache_beam' for instance' ``` ## Environment info Colab notebook.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4779/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4779/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/4778
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4778/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4778/comments
https://api.github.com/repos/huggingface/datasets/issues/4778/events
https://github.com/huggingface/datasets/pull/4778
1,324,928,750
PR_kwDODunzps48dRPh
4,778
Update local loading script docs
{ "login": "stevhliu", "id": 59462357, "node_id": "MDQ6VXNlcjU5NDYyMzU3", "avatar_url": "https://avatars.githubusercontent.com/u/59462357?v=4", "gravatar_id": "", "url": "https://api.github.com/users/stevhliu", "html_url": "https://github.com/stevhliu", "followers_url": "https://api.github.com/users/stevhliu/followers", "following_url": "https://api.github.com/users/stevhliu/following{/other_user}", "gists_url": "https://api.github.com/users/stevhliu/gists{/gist_id}", "starred_url": "https://api.github.com/users/stevhliu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stevhliu/subscriptions", "organizations_url": "https://api.github.com/users/stevhliu/orgs", "repos_url": "https://api.github.com/users/stevhliu/repos", "events_url": "https://api.github.com/users/stevhliu/events{/privacy}", "received_events_url": "https://api.github.com/users/stevhliu/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892861, "node_id": "MDU6TGFiZWwxOTM1ODkyODYx", "url": "https://api.github.com/repos/huggingface/datasets/labels/documentation", "name": "documentation", "color": "0075ca", "default": true, "description": "Improvements or additions to documentation" } ]
closed
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_4778). All of your documentation changes will be reflected on that endpoint.", "I would rather have a section in the docs that explains how to modify the script of an existing dataset (`inspect_dataset` + modification + `load_dataset`) instead of focusing on the GH datasets bundled with the source (only applicable for devs).", "Good idea! I went with @mariosasko's suggestion to use `inspect_dataset` instead of cloning a dataset repository since it's a good opportunity to show off more of the library's lesser-known functions if that's ok with everyone :)", "One advantage of cloning the repo is that it fetches potential data files referenced inside a script using relative paths, so if we decide to use `inspect_dataset`, we should at least add a tip to explain this limitation and how to circumvent it.", "Oh you're right. Calling `load_dataset` on the modified script without having the files that come with it is not ideal. I agree it should be `git clone` instead - and inspect is for inspection only ^^'" ]
2022-08-01T20:21:07
2022-08-23T16:32:26
2022-08-23T16:32:22
MEMBER
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4778", "html_url": "https://github.com/huggingface/datasets/pull/4778", "diff_url": "https://github.com/huggingface/datasets/pull/4778.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4778.patch", "merged_at": "2022-08-23T16:32:22" }
This PR clarifies the local loading script section to include how to load a dataset after you've modified the local loading script (closes #4732).
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4778/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4778/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4777
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4777/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4777/comments
https://api.github.com/repos/huggingface/datasets/issues/4777/events
https://github.com/huggingface/datasets/pull/4777
1,324,548,784
PR_kwDODunzps48cByL
4,777
Require torchaudio<0.12.0 to avoid RuntimeError
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-08-01T14:50:50
2022-08-02T17:35:14
2022-08-02T17:21:39
MEMBER
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4777", "html_url": "https://github.com/huggingface/datasets/pull/4777", "diff_url": "https://github.com/huggingface/datasets/pull/4777.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4777.patch", "merged_at": "2022-08-02T17:21:39" }
Related to: - https://github.com/huggingface/transformers/issues/18379 Fix partially #4776.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4777/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4777/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4776
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4776/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4776/comments
https://api.github.com/repos/huggingface/datasets/issues/4776/events
https://github.com/huggingface/datasets/issues/4776
1,324,493,860
I_kwDODunzps5O8iwk
4,776
RuntimeError when using torchaudio 0.12.0 to load MP3 audio file
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Requiring torchaudio<0.12.0 isn't really a viable solution because that implies torch<0.12.0 which means no sm_86 CUDA support which means no RTX 3090 support in PyTorch.\r\n\r\nBut in my case, the error only occurs if `_fallback_load` resolves to `_fail_load` inside torchaudio 0.12.0 which is only the case if FFMPEG initialization failed: https://github.com/pytorch/audio/blob/b1f510fa5681e92ee82bdc6b2d1ed896799fc32c/torchaudio/backend/sox_io_backend.py#L36-L47\r\n\r\nThat means the proper solution for torchaudio>=0.12.0 is to check `torchaudio._extension._FFMPEG_INITIALIZED` and if it is False, then we need to remind the user to install a dynamically linked ffmpeg 4.1.8 and then maybe call `torchaudio._extension._init_ffmpeg()` to force a user-visible exception showing the missing ffmpeg dynamic library name.\r\n\r\nOn my system, installing \r\n\r\n- libavcodec.so.58 \r\n- libavdevice.so.58 \r\n- libavfilter.so.7 \r\n- libavformat.so.58 \r\n- libavutil.so.56 \r\n- libswresample.so.3 \r\n- libswscale.so.5\r\n\r\nfrom ffmpeg 4.1.8 made HF datasets 2.3.2 work just fine with torchaudio 0.12.1+cu116:\r\n\r\n```python3\r\nimport sox, torchaudio, datasets\r\nprint('torchaudio', torchaudio.__version__)\r\nprint('datasets', datasets.__version__)\r\ntorchaudio._extension._init_ffmpeg()\r\nprint(torchaudio._extension._FFMPEG_INITIALIZED)\r\nwaveform, sample_rate = torchaudio.load('/workspace/.cache/huggingface/datasets/downloads/extracted/8e5aa88585efa2a4c74c6664b576550d32b7ff9c3d1d17cc04f44f11338c3dc6/cv-corpus-8.0-2022-01-19/en/clips/common_voice_en_100038.mp3', format='mp3')\r\nprint(waveform.shape)\r\n```\r\n\r\n```\r\ntorchaudio 0.12.1+cu116\r\ndatasets 2.3.2\r\nTrue\r\ntorch.Size([1, 369792])\r\n```", "Related: https://github.com/huggingface/datasets/issues/4889", "Closing as we no longer use `torchaudio` for decoding MP3 files." ]
2022-08-01T14:11:23
2023-03-02T15:58:16
2023-03-02T15:58:15
MEMBER
null
null
null
Current version of `torchaudio` (0.12.0) raises a RuntimeError when trying to use `sox_io` backend but non-Python dependency `sox` is not installed: https://github.com/pytorch/audio/blob/2e1388401c434011e9f044b40bc8374f2ddfc414/torchaudio/backend/sox_io_backend.py#L21-L29 ```python def _fail_load( filepath: str, frame_offset: int = 0, num_frames: int = -1, normalize: bool = True, channels_first: bool = True, format: Optional[str] = None, ) -> Tuple[torch.Tensor, int]: raise RuntimeError("Failed to load audio from {}".format(filepath)) ``` Maybe we should raise a more actionable error message so that the user knows how to fix it. UPDATE: - this is an incompatibility of latest torchaudio (0.12.0) and the sox backend TODO: - [x] as a temporary solution, we should recommend installing torchaudio<0.12.0 - #4777 - #4785 - [ ] however, a stable solution must be found for torchaudio>=0.12.0 Related to: - https://github.com/huggingface/transformers/issues/18379
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4776/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4776/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/4775
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4775/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4775/comments
https://api.github.com/repos/huggingface/datasets/issues/4775/events
https://github.com/huggingface/datasets/issues/4775
1,324,136,486
I_kwDODunzps5O7Lgm
4,775
Streaming not supported in Theivaprakasham/wildreceipt
{ "login": "NitishkKarra", "id": 100361173, "node_id": "U_kgDOBftj1Q", "avatar_url": "https://avatars.githubusercontent.com/u/100361173?v=4", "gravatar_id": "", "url": "https://api.github.com/users/NitishkKarra", "html_url": "https://github.com/NitishkKarra", "followers_url": "https://api.github.com/users/NitishkKarra/followers", "following_url": "https://api.github.com/users/NitishkKarra/following{/other_user}", "gists_url": "https://api.github.com/users/NitishkKarra/gists{/gist_id}", "starred_url": "https://api.github.com/users/NitishkKarra/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/NitishkKarra/subscriptions", "organizations_url": "https://api.github.com/users/NitishkKarra/orgs", "repos_url": "https://api.github.com/users/NitishkKarra/repos", "events_url": "https://api.github.com/users/NitishkKarra/events{/privacy}", "received_events_url": "https://api.github.com/users/NitishkKarra/received_events", "type": "User", "site_admin": false }
[ { "id": 3287858981, "node_id": "MDU6TGFiZWwzMjg3ODU4OTgx", "url": "https://api.github.com/repos/huggingface/datasets/labels/streaming", "name": "streaming", "color": "fef2c0", "default": false, "description": "" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[ "Thanks for reporting @NitishkKarra.\r\n\r\nThe root source of the issue is that streaming mode is not supported out-of-the-box for that dataset, because it contains a TAR file.\r\n\r\nWe have opened a discussion in the corresponding Hub dataset page, pointing out this issue: https://huggingface.co/datasets/Theivaprakasham/wildreceipt/discussions/1\r\n\r\nI'm closing this issue here, so this discussion is transferred there instead." ]
2022-08-01T09:46:17
2022-08-01T10:30:29
2022-08-01T10:30:29
NONE
null
null
null
### Link _No response_ ### Description _No response_ ### Owner _No response_
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4775/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4775/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/4774
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4774/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4774/comments
https://api.github.com/repos/huggingface/datasets/issues/4774/events
https://github.com/huggingface/datasets/issues/4774
1,323,375,844
I_kwDODunzps5O4Rzk
4,774
Training hangs at the end of epoch, with set_transform/with_transform+multiple workers
{ "login": "memray", "id": 4197249, "node_id": "MDQ6VXNlcjQxOTcyNDk=", "avatar_url": "https://avatars.githubusercontent.com/u/4197249?v=4", "gravatar_id": "", "url": "https://api.github.com/users/memray", "html_url": "https://github.com/memray", "followers_url": "https://api.github.com/users/memray/followers", "following_url": "https://api.github.com/users/memray/following{/other_user}", "gists_url": "https://api.github.com/users/memray/gists{/gist_id}", "starred_url": "https://api.github.com/users/memray/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/memray/subscriptions", "organizations_url": "https://api.github.com/users/memray/orgs", "repos_url": "https://api.github.com/users/memray/repos", "events_url": "https://api.github.com/users/memray/events{/privacy}", "received_events_url": "https://api.github.com/users/memray/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
[]
2022-07-31T06:32:28
2022-07-31T06:36:43
null
NONE
null
null
null
## Describe the bug I use load_dataset() (I tried with [wiki](https://huggingface.co/datasets/wikipedia) and my own json data) and use set_transform/with_transform for preprocessing. But it hangs at the end of the 1st epoch if dataloader_num_workers>=1. No problem with single worker. ## Steps to reproduce the bug ```python train_dataset = datasets.load_dataset("wikipedia", "20220301.en", split='train', cache_dir=model_args.cache_dir, streaming=False) train_dataset.set_transform(psg_parse_fn) train_dataloader = DataLoader( train_dataset, batch_size=args.train_batch_size, sampler=DistributedSampler(train_dataset), collate_fn=data_collator, drop_last=args.dataloader_drop_last, num_workers=args.dataloader_num_workers, ) ``` ## Expected results ## Actual results It simply hangs. The ending step is num_example/batch_size (one epoch). ## Environment info - `datasets` version: 2.4.1.dev0 - Platform: Linux-5.4.170+-x86_64-with-glibc2.17 - Python version: 3.8.12 - PyArrow version: 8.0.0 - Pandas version: 1.4.1
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4774/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4774/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4773
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4773/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4773/comments
https://api.github.com/repos/huggingface/datasets/issues/4773/events
https://github.com/huggingface/datasets/pull/4773
1,322,796,721
PR_kwDODunzps48WNV3
4,773
Document loading from relative path
{ "login": "stevhliu", "id": 59462357, "node_id": "MDQ6VXNlcjU5NDYyMzU3", "avatar_url": "https://avatars.githubusercontent.com/u/59462357?v=4", "gravatar_id": "", "url": "https://api.github.com/users/stevhliu", "html_url": "https://github.com/stevhliu", "followers_url": "https://api.github.com/users/stevhliu/followers", "following_url": "https://api.github.com/users/stevhliu/following{/other_user}", "gists_url": "https://api.github.com/users/stevhliu/gists{/gist_id}", "starred_url": "https://api.github.com/users/stevhliu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stevhliu/subscriptions", "organizations_url": "https://api.github.com/users/stevhliu/orgs", "repos_url": "https://api.github.com/users/stevhliu/repos", "events_url": "https://api.github.com/users/stevhliu/events{/privacy}", "received_events_url": "https://api.github.com/users/stevhliu/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892861, "node_id": "MDU6TGFiZWwxOTM1ODkyODYx", "url": "https://api.github.com/repos/huggingface/datasets/labels/documentation", "name": "documentation", "color": "0075ca", "default": true, "description": "Improvements or additions to documentation" } ]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "Thanks for the feedback!\r\n\r\nI agree that adding it to `load_hub.mdx` is probably a bit too specific, especially for beginners reading the tutorials. Since this clarification is closely related to loading from the Hub (the only difference being the presence/absence of a loading script), I think it makes the most sense to keep it somewhere in `loading.mdx`. What do you think about adding a Warning in Loading >>> Hugging Face Hub that explains the difference between relative/absolute paths when there is a script?", "What about updating the section about \"manual download\" ? I think it goes there no ?\r\n\r\nhttps://huggingface.co/docs/datasets/v2.4.0/en/loading#manual-download", "Updated the manual download section :)", "Thanks ! Pinging @albertvillanova to review this change, and then I think we're good to merge" ]
2022-07-29T23:32:21
2022-08-25T18:36:45
2022-08-25T18:34:23
MEMBER
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4773", "html_url": "https://github.com/huggingface/datasets/pull/4773", "diff_url": "https://github.com/huggingface/datasets/pull/4773.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4773.patch", "merged_at": "2022-08-25T18:34:23" }
This PR describes loading a dataset from the Hub by specifying a relative path in `data_dir` or `data_files` in `load_dataset` (see #4757).
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4773/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4773/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4772
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4772/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4772/comments
https://api.github.com/repos/huggingface/datasets/issues/4772/events
https://github.com/huggingface/datasets/issues/4772
1,322,693,123
I_kwDODunzps5O1rID
4,772
AssertionError when using label_cols in to_tf_dataset
{ "login": "lehrig", "id": 9555494, "node_id": "MDQ6VXNlcjk1NTU0OTQ=", "avatar_url": "https://avatars.githubusercontent.com/u/9555494?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lehrig", "html_url": "https://github.com/lehrig", "followers_url": "https://api.github.com/users/lehrig/followers", "following_url": "https://api.github.com/users/lehrig/following{/other_user}", "gists_url": "https://api.github.com/users/lehrig/gists{/gist_id}", "starred_url": "https://api.github.com/users/lehrig/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lehrig/subscriptions", "organizations_url": "https://api.github.com/users/lehrig/orgs", "repos_url": "https://api.github.com/users/lehrig/repos", "events_url": "https://api.github.com/users/lehrig/events{/privacy}", "received_events_url": "https://api.github.com/users/lehrig/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "cc @Rocketknight1 ", "Hi @lehrig, this is caused by the data collator renaming \"label\" to \"labels\". If you set `label_cols=[\"labels\"]` in the call it will work correctly. However, I agree that the cause of the bug is not obvious, so I'll see if I can make a PR to clarify things when the collator renames columns.", "Thanks - and wow, that appears like a strange side-effect of the data collator. Is that really needed?\r\n\r\nWhy not make it more explicit? For example, extend `DefaultDataCollator` with an optional property `label_col_name` to be used as label column; only when it is not provided default to `labels` (and document that this happens) for backwards-compatibility? ", "Haha, I honestly have no idea why our data collators rename `\"label\"` (the standard label column name in our datasets) to `\"labels\"` (the standard label column name input to our models). It's been a pain point when I design TF data pipelines, though, because I don't want to hardcode things like that - especially in `datasets`, because the renaming is something that happens purely at the `transformers` end. I don't think I could make the change in the data collators themselves at this point, because it would break backward compatibility for everything in PyTorch as well as TF.\r\n\r\nIn the most recent version of `transformers` we added a [prepare_tf_dataset](https://huggingface.co/docs/transformers/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset) method to our models which takes care of these details for you, and even chooses appropriate columns and labels for the model you're using. In future we might make that the officially recommended way to convert HF datasets to `tf.data.Dataset`.", "Interesting, that'd be great especially for clarity. https://huggingface.co/docs/datasets/use_with_tensorflow#data-loading already improved clarity, yet, all those options will still confuse people. Looking forward to those advances in the hope there'll be only 1 way in the future ;)\r\n\r\nAnyways, I am happy for the time being with the work-around you provided. Thank you!" ]
2022-07-29T21:32:12
2022-09-12T11:24:46
2022-09-12T11:24:46
NONE
null
null
null
## Describe the bug An incorrect `AssertionError` is raised when using `label_cols` in `to_tf_dataset` and the label's key name is `label`. The assertion is in this line: https://github.com/huggingface/datasets/blob/2.4.0/src/datasets/arrow_dataset.py#L475 ## Steps to reproduce the bug ```python from datasets import load_dataset from transformers import DefaultDataCollator dataset = load_dataset('glue', 'mrpc', split='train') tf_dataset = dataset.to_tf_dataset( columns=["sentence1", "sentence2", "idx"], label_cols=["label"], batch_size=16, collate_fn=DefaultDataCollator(return_tensors="tf"), ) ``` ## Expected results No assertion error. ## Actual results ``` AssertionError: in user code: File "/opt/conda/lib/python3.8/site-packages/datasets/arrow_dataset.py", line 475, in split_features_and_labels * assert set(features.keys()).union(labels.keys()) == set(input_batch.keys()) ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 2.4.0 - Platform: Linux-4.18.0-305.45.1.el8_4.ppc64le-ppc64le-with-glibc2.17 - Python version: 3.8.13 - PyArrow version: 7.0.0 - Pandas version: 1.4.3
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4772/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4772/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/4771
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4771/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4771/comments
https://api.github.com/repos/huggingface/datasets/issues/4771/events
https://github.com/huggingface/datasets/pull/4771
1,322,600,725
PR_kwDODunzps48VjWx
4,771
Remove dummy data generation docs
{ "login": "stevhliu", "id": 59462357, "node_id": "MDQ6VXNlcjU5NDYyMzU3", "avatar_url": "https://avatars.githubusercontent.com/u/59462357?v=4", "gravatar_id": "", "url": "https://api.github.com/users/stevhliu", "html_url": "https://github.com/stevhliu", "followers_url": "https://api.github.com/users/stevhliu/followers", "following_url": "https://api.github.com/users/stevhliu/following{/other_user}", "gists_url": "https://api.github.com/users/stevhliu/gists{/gist_id}", "starred_url": "https://api.github.com/users/stevhliu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stevhliu/subscriptions", "organizations_url": "https://api.github.com/users/stevhliu/orgs", "repos_url": "https://api.github.com/users/stevhliu/repos", "events_url": "https://api.github.com/users/stevhliu/events{/privacy}", "received_events_url": "https://api.github.com/users/stevhliu/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892861, "node_id": "MDU6TGFiZWwxOTM1ODkyODYx", "url": "https://api.github.com/repos/huggingface/datasets/labels/documentation", "name": "documentation", "color": "0075ca", "default": true, "description": "Improvements or additions to documentation" } ]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-07-29T19:20:46
2022-08-03T00:04:01
2022-08-02T23:50:29
MEMBER
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4771", "html_url": "https://github.com/huggingface/datasets/pull/4771", "diff_url": "https://github.com/huggingface/datasets/pull/4771.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4771.patch", "merged_at": "2022-08-02T23:50:29" }
This PR removes instructions to generate dummy data since that is no longer necessary for datasets that are uploaded to the Hub instead of our GitHub repo. Close #4744
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4771/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4771/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4770
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4770/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4770/comments
https://api.github.com/repos/huggingface/datasets/issues/4770/events
https://github.com/huggingface/datasets/pull/4770
1,322,147,855
PR_kwDODunzps48UEBT
4,770
fix typo
{ "login": "xwwwwww", "id": 48146603, "node_id": "MDQ6VXNlcjQ4MTQ2NjAz", "avatar_url": "https://avatars.githubusercontent.com/u/48146603?v=4", "gravatar_id": "", "url": "https://api.github.com/users/xwwwwww", "html_url": "https://github.com/xwwwwww", "followers_url": "https://api.github.com/users/xwwwwww/followers", "following_url": "https://api.github.com/users/xwwwwww/following{/other_user}", "gists_url": "https://api.github.com/users/xwwwwww/gists{/gist_id}", "starred_url": "https://api.github.com/users/xwwwwww/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/xwwwwww/subscriptions", "organizations_url": "https://api.github.com/users/xwwwwww/orgs", "repos_url": "https://api.github.com/users/xwwwwww/repos", "events_url": "https://api.github.com/users/xwwwwww/events{/privacy}", "received_events_url": "https://api.github.com/users/xwwwwww/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "good catch thanks ! Can you check if the same typo is also present in `add_elasticsearch_index` ? It has a very similar signature", "> good catch thanks ! Can you check if the same typo is also present in `add_elasticsearch_index` ? It has a very similar signature\r\n\r\nfixed" ]
2022-07-29T11:46:12
2022-07-29T16:02:07
2022-07-29T16:02:07
CONTRIBUTOR
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/4770", "html_url": "https://github.com/huggingface/datasets/pull/4770", "diff_url": "https://github.com/huggingface/datasets/pull/4770.diff", "patch_url": "https://github.com/huggingface/datasets/pull/4770.patch", "merged_at": "2022-07-29T16:02:07" }
By defaul -> By default
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4770/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4770/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4769
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4769/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4769/comments
https://api.github.com/repos/huggingface/datasets/issues/4769/events
https://github.com/huggingface/datasets/issues/4769
1,322,121,554
I_kwDODunzps5OzflS
4,769
Fail to process SQuADv1.1 datasets with max_seq_length=128, doc_stride=96.
{ "login": "zhuango", "id": 5491519, "node_id": "MDQ6VXNlcjU0OTE1MTk=", "avatar_url": "https://avatars.githubusercontent.com/u/5491519?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zhuango", "html_url": "https://github.com/zhuango", "followers_url": "https://api.github.com/users/zhuango/followers", "following_url": "https://api.github.com/users/zhuango/following{/other_user}", "gists_url": "https://api.github.com/users/zhuango/gists{/gist_id}", "starred_url": "https://api.github.com/users/zhuango/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zhuango/subscriptions", "organizations_url": "https://api.github.com/users/zhuango/orgs", "repos_url": "https://api.github.com/users/zhuango/repos", "events_url": "https://api.github.com/users/zhuango/events{/privacy}", "received_events_url": "https://api.github.com/users/zhuango/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
[]
2022-07-29T11:18:24
2022-07-29T11:18:24
null
NONE
null
null
null
## Describe the bug datasets fail to process SQuADv1.1 with max_seq_length=128, doc_stride=96 when calling datasets["train"].train_dataset.map(). ## Steps to reproduce the bug I used huggingface[ TF2 question-answering examples](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/question-answering). And my scripts are as follows: ``` python run_qa.py \ --model_name_or_path $BERT_DIR \ --dataset_name $SQUAD_DIR \ --do_train \ --do_eval \ --per_device_train_batch_size 12 \ --learning_rate 3e-5 \ --num_train_epochs 2 \ --max_seq_length 128 \ --doc_stride 96 \ --output_dir $OUTPUT \ --save_steps 10000 \ --overwrite_cache \ --overwrite_output_dir \ ``` ## Expected results Normally process SQuADv1.1 datasets with max_seq_length=128, doc_stride=96. ## Actual results ``` INFO:__main__:Padding all batches to max length because argument was set or we're on TPU. WARNING:datasets.fingerprint:Parameter 'function'=<function main.<locals>.prepare_train_features at 0x7f15bc2d07a0> of the transform datasets.arrow_dataset.Dataset._map_single couldn't be hashed properly, a random hash was used instead. Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. This warning is only showed once. Subsequent hashing failures won't be showed. 0%| | 0/88 [00:00<?, ?ba/s]thread '<unnamed>' panicked at 'assertion failed: stride < max_len', /__w/tokenizers/tokenizers/tokenizers/src/tokenizer/encoding.rs:311:9 note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace 0%| | 0/88 [00:00<?, ?ba/s] Traceback (most recent call last): File "run_qa.py", line 743, in <module> main() File "run_qa.py", line 485, in main load_from_cache_file=not data_args.overwrite_cache, File "/anaconda3/envs/py37/lib/python3.7/site-packages/datasets/arrow_dataset.py", line 2394, in map desc=desc, File "/anaconda3/envs/py37/lib/python3.7/site-packages/datasets/arrow_dataset.py", line 551, in wrapper out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) File "/anaconda3/envs/py37/lib/python3.7/site-packages/datasets/arrow_dataset.py", line 518, in wrapper out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) File "/anaconda3/envs/py37/lib/python3.7/site-packages/datasets/fingerprint.py", line 458, in wrapper out = func(self, *args, **kwargs) File "anaconda3/envs/py37/lib/python3.7/site-packages/datasets/arrow_dataset.py", line 2768, in _map_single offset=offset, File "anaconda3/envs/py37/lib/python3.7/site-packages/datasets/arrow_dataset.py", line 2644, in apply_function_on_filtered_inputs processed_inputs = function(*fn_args, *additional_args, **fn_kwargs) File "anaconda3/envs/py37/lib/python3.7/site-packages/datasets/arrow_dataset.py", line 2336, in decorated result = f(decorated_item, *args, **kwargs) File "run_qa.py", line 410, in prepare_train_features padding=padding, File "anaconda3/envs/py37/lib/python3.7/site-packages/transformers/tokenization_utils_base.py", line 2512, in __call__ **kwargs, File "anaconda3/envs/py37/lib/python3.7/site-packages/transformers/tokenization_utils_base.py", line 2703, in batch_encode_plus **kwargs, File "anaconda3/envs/py37/lib/python3.7/site-packages/transformers/tokenization_utils_fast.py", line 429, in _batch_encode_plus is_pretokenized=is_split_into_words, pyo3_runtime.PanicException: assertion failed: stride < max_len Traceback (most recent call last): File "./data/SQuADv1.1/evaluate-v1.1.py", line 92, in <module> with open(args.prediction_file) as prediction_file: FileNotFoundError: [Errno 2] No such file or directory: './output/bert_base_squadv1.1_tf2/eval_predictions.json' ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 2.3.2 - Platform: Ubuntu, pytorch=1.11.0, tensorflow-gpu=2.9.1 - Python version: 2.7 - PyArrow version: 8.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4769/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4769/timeline
null
null
false