Datasets:
Update README.md
Browse files
README.md
CHANGED
@@ -30,11 +30,11 @@ pretty_name: arxiv_research_code
|
|
30 |
size_categories:
|
31 |
- 10B<n<100B
|
32 |
---
|
33 |
-
# Dataset Card for "
|
34 |
|
35 |
## Dataset Description
|
36 |
|
37 |
-
https://huggingface.co/datasets/
|
38 |
|
39 |
|
40 |
### Dataset Summary
|
@@ -46,10 +46,10 @@ ArtifactAI/arxiv_research_code contains over 21.8GB of source code files referen
|
|
46 |
from datasets import load_dataset
|
47 |
|
48 |
# full dataset (21.8GB of data)
|
49 |
-
ds = load_dataset("
|
50 |
|
51 |
# dataset streaming (will only download the data as needed)
|
52 |
-
ds = load_dataset("
|
53 |
for sample in iter(ds): print(sample["code"])
|
54 |
```
|
55 |
|
@@ -87,7 +87,7 @@ The released dataset may contain sensitive information such as emails, IP addres
|
|
87 |
## Additional Information
|
88 |
|
89 |
### Dataset Curators
|
90 |
-
Matthew Kenney,
|
91 |
|
92 |
### Citation Information
|
93 |
```
|
|
|
30 |
size_categories:
|
31 |
- 10B<n<100B
|
32 |
---
|
33 |
+
# Dataset Card for "AlgorithmicResearchGroup/arxiv_research_code"
|
34 |
|
35 |
## Dataset Description
|
36 |
|
37 |
+
https://huggingface.co/datasets/AlgorithmicResearchGroup/arxiv_research_code
|
38 |
|
39 |
|
40 |
### Dataset Summary
|
|
|
46 |
from datasets import load_dataset
|
47 |
|
48 |
# full dataset (21.8GB of data)
|
49 |
+
ds = load_dataset("AlgorithmicResearchGroup/arxiv_research_code", split="train")
|
50 |
|
51 |
# dataset streaming (will only download the data as needed)
|
52 |
+
ds = load_dataset("AlgorithmicResearchGroup/arxiv_research_code", streaming=True, split="train")
|
53 |
for sample in iter(ds): print(sample["code"])
|
54 |
```
|
55 |
|
|
|
87 |
## Additional Information
|
88 |
|
89 |
### Dataset Curators
|
90 |
+
Matthew Kenney, AlgorithmicResearchGroup, matt@algorithmicresearchgroup.com
|
91 |
|
92 |
### Citation Information
|
93 |
```
|