Datasets:
Tasks:
Text Retrieval
Modalities:
Text
Sub-tasks:
document-retrieval
Languages:
code
Size:
100K - 1M
ArXiv:
License:
fix - issue
Browse files- README.md +10 -10
- repobench-r.py +16 -16
README.md
CHANGED
@@ -38,26 +38,26 @@ code prediction.
|
|
38 |
|
39 |
The dataset has 4 subsets:
|
40 |
|
41 |
-
- `
|
42 |
-
- `
|
43 |
-
- `
|
44 |
-
- `
|
45 |
|
46 |
Each subset has 4 splits:
|
47 |
|
48 |
-
- `
|
49 |
-
- `
|
50 |
-
- `
|
51 |
-
- `
|
52 |
|
53 |
## Loading Data
|
54 |
|
55 |
-
For example, if you want to load the `test` `
|
56 |
|
57 |
```python
|
58 |
from datasets import load_dataset
|
59 |
|
60 |
-
dataset = load_dataset("tianyang/repobench-r", "
|
61 |
```
|
62 |
|
63 |
## Dataset Structure
|
|
|
38 |
|
39 |
The dataset has 4 subsets:
|
40 |
|
41 |
+
- `python_cff`: python dataset with `cff` setting.
|
42 |
+
- `python_cfr`: python dataset with `cfr` setting.
|
43 |
+
- `java_cff`: java dataset with `cff` setting.
|
44 |
+
- `java_cfr`: java dataset with `cfr` setting.
|
45 |
|
46 |
Each subset has 4 splits:
|
47 |
|
48 |
+
- `train_easy`: training set with easy difficulty, where the number of code snippets in the context $$k$$ satisfies $$ 5 \leq k < 10 $$.
|
49 |
+
- `train_hard`: training set with hard difficulty, where the number of code snippets in the context $$k$$ satisfies $$ k \geq 10 $$.
|
50 |
+
- `test_easy`: testing set with easy difficulty.
|
51 |
+
- `test_hard`: testing set with hard difficulty.
|
52 |
|
53 |
## Loading Data
|
54 |
|
55 |
+
For example, if you want to load the `test` `cross_file_first` `python` dataset with `easy` difficulty, you can use the following code:
|
56 |
|
57 |
```python
|
58 |
from datasets import load_dataset
|
59 |
|
60 |
+
dataset = load_dataset("tianyang/repobench-r", "python_cff", "test_easy")
|
61 |
```
|
62 |
|
63 |
## Dataset Structure
|
repobench-r.py
CHANGED
@@ -43,10 +43,10 @@ _HOMEPAGE = "https://github.com/Leolty/repobench"
|
|
43 |
_LICENSE = "Apache License 2.0"
|
44 |
|
45 |
_URLs = {
|
46 |
-
"
|
47 |
-
"
|
48 |
-
"
|
49 |
-
"
|
50 |
}
|
51 |
|
52 |
class RepoBenchR(datasets.GeneratorBasedBuilder):
|
@@ -56,7 +56,7 @@ class RepoBenchR(datasets.GeneratorBasedBuilder):
|
|
56 |
|
57 |
BUILDER_CONFIGS = [
|
58 |
datasets.BuilderConfig(
|
59 |
-
name="
|
60 |
description=textwrap.dedent(
|
61 |
"""
|
62 |
cff: cross_file_first -> mask the the line that a cross-file module is first used
|
@@ -64,7 +64,7 @@ class RepoBenchR(datasets.GeneratorBasedBuilder):
|
|
64 |
)
|
65 |
),
|
66 |
datasets.BuilderConfig(
|
67 |
-
name="
|
68 |
description=textwrap.dedent(
|
69 |
"""
|
70 |
cfr: cross_file_random -> mask a random line that a cross-file module is used (not the first time)
|
@@ -72,7 +72,7 @@ class RepoBenchR(datasets.GeneratorBasedBuilder):
|
|
72 |
)
|
73 |
),
|
74 |
datasets.BuilderConfig(
|
75 |
-
name="
|
76 |
description=textwrap.dedent(
|
77 |
"""
|
78 |
cff: cross_file_first -> mask the the line that a cross-file module is first used
|
@@ -80,7 +80,7 @@ class RepoBenchR(datasets.GeneratorBasedBuilder):
|
|
80 |
)
|
81 |
),
|
82 |
datasets.BuilderConfig(
|
83 |
-
name="
|
84 |
description=textwrap.dedent(
|
85 |
"""
|
86 |
cfr: cross_file_random -> mask a random line that a cross-file module is used (not the first time)
|
@@ -117,20 +117,20 @@ class RepoBenchR(datasets.GeneratorBasedBuilder):
|
|
117 |
|
118 |
return [
|
119 |
datasets.SplitGenerator(
|
120 |
-
name=datasets.Split("
|
121 |
-
gen_kwargs={"data_dir": data_dir, "split": "
|
122 |
),
|
123 |
datasets.SplitGenerator(
|
124 |
name=datasets.Split("train_hard"),
|
125 |
-
gen_kwargs={"data_dir": data_dir, "split": "
|
126 |
),
|
127 |
datasets.SplitGenerator(
|
128 |
-
name=datasets.Split("
|
129 |
-
gen_kwargs={"data_dir": data_dir, "split": "
|
130 |
),
|
131 |
datasets.SplitGenerator(
|
132 |
-
name=datasets.Split("
|
133 |
-
gen_kwargs={"data_dir": data_dir, "split": "
|
134 |
)
|
135 |
]
|
136 |
|
@@ -139,7 +139,7 @@ class RepoBenchR(datasets.GeneratorBasedBuilder):
|
|
139 |
with gzip.open(data_dir, "rb") as f:
|
140 |
data = pickle.load(f)
|
141 |
|
142 |
-
subset, level = split.split("
|
143 |
|
144 |
for i, example in enumerate(data[subset][level]):
|
145 |
yield i, {
|
|
|
43 |
_LICENSE = "Apache License 2.0"
|
44 |
|
45 |
_URLs = {
|
46 |
+
"java_cff": "https://huggingface.co/datasets/tianyang/repobench-r/resolve/main/data/java_cff.gz",
|
47 |
+
"java_cfr": "https://huggingface.co/datasets/tianyang/repobench-r/resolve/main/data/java_cfr.gz",
|
48 |
+
"python_cff": "https://huggingface.co/datasets/tianyang/repobench-r/resolve/main/data/python_cff.gz",
|
49 |
+
"python_cfr": "https://huggingface.co/datasets/tianyang/repobench-r/resolve/main/data/python_cfr.gz"
|
50 |
}
|
51 |
|
52 |
class RepoBenchR(datasets.GeneratorBasedBuilder):
|
|
|
56 |
|
57 |
BUILDER_CONFIGS = [
|
58 |
datasets.BuilderConfig(
|
59 |
+
name="python_cff",
|
60 |
description=textwrap.dedent(
|
61 |
"""
|
62 |
cff: cross_file_first -> mask the the line that a cross-file module is first used
|
|
|
64 |
)
|
65 |
),
|
66 |
datasets.BuilderConfig(
|
67 |
+
name="python_cfr",
|
68 |
description=textwrap.dedent(
|
69 |
"""
|
70 |
cfr: cross_file_random -> mask a random line that a cross-file module is used (not the first time)
|
|
|
72 |
)
|
73 |
),
|
74 |
datasets.BuilderConfig(
|
75 |
+
name="java_cff",
|
76 |
description=textwrap.dedent(
|
77 |
"""
|
78 |
cff: cross_file_first -> mask the the line that a cross-file module is first used
|
|
|
80 |
)
|
81 |
),
|
82 |
datasets.BuilderConfig(
|
83 |
+
name="java_cfr",
|
84 |
description=textwrap.dedent(
|
85 |
"""
|
86 |
cfr: cross_file_random -> mask a random line that a cross-file module is used (not the first time)
|
|
|
117 |
|
118 |
return [
|
119 |
datasets.SplitGenerator(
|
120 |
+
name=datasets.Split("train_easy"),
|
121 |
+
gen_kwargs={"data_dir": data_dir, "split": "train_easy"},
|
122 |
),
|
123 |
datasets.SplitGenerator(
|
124 |
name=datasets.Split("train_hard"),
|
125 |
+
gen_kwargs={"data_dir": data_dir, "split": "train_hard"},
|
126 |
),
|
127 |
datasets.SplitGenerator(
|
128 |
+
name=datasets.Split("test_easy"),
|
129 |
+
gen_kwargs={"data_dir": data_dir, "split": "test_easy"},
|
130 |
),
|
131 |
datasets.SplitGenerator(
|
132 |
+
name=datasets.Split("test_hard"),
|
133 |
+
gen_kwargs={"data_dir": data_dir, "split": "test_hard"},
|
134 |
)
|
135 |
]
|
136 |
|
|
|
139 |
with gzip.open(data_dir, "rb") as f:
|
140 |
data = pickle.load(f)
|
141 |
|
142 |
+
subset, level = split.split("_")
|
143 |
|
144 |
for i, example in enumerate(data[subset][level]):
|
145 |
yield i, {
|