tianyang commited on
Commit
2189da0
·
1 Parent(s): 2c0415b

first commit

Browse files
Files changed (1) hide show
  1. repobench-p.py +140 -0
repobench-p.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """RepoBench: Benchmarking Repository-Level Code Auto-Completion Systems"""
16
+
17
+ import gzip
18
+ import pickle
19
+ import textwrap
20
+ import datasets
21
+
22
+ _CITATION = """\
23
+ @misc{liu2023repobench,
24
+ title={RepoBench: Benchmarking Repository-Level Code Auto-Completion Systems},
25
+ author={Tianyang Liu and Canwen Xu and Julian McAuley},
26
+ year={2023},
27
+ eprint={2306.03091},
28
+ archivePrefix={arXiv},
29
+ primaryClass={cs.CL}
30
+ }
31
+ """
32
+
33
+ _DESCRIPTION = """\
34
+ RepoBench is a dataset that benchmarks repository-level code auto-completion systems.
35
+
36
+ RepoBench-P denotes RepoBench for pipeline,
37
+ which is subtask of RepoBench including both relevant code retrieval and next-line code prediction.
38
+ """
39
+
40
+ _HOMEPAGE = "https://github.com/Leolty/repobench"
41
+
42
+ _LICENSE = "Apache License 2.0"
43
+
44
+ _URLs = {
45
+ "python": "https://raw.githubusercontent.com/Leolty/repobench/main/data/pipeline/python/",
46
+ "java": "https://raw.githubusercontent.com/Leolty/repobench/main/data/pipeline/java/"
47
+ }
48
+
49
+ class RepoBenchC(datasets.GeneratorBasedBuilder):
50
+ """RepoBench"""
51
+
52
+ VERSION = datasets.Version("1.0.0")
53
+
54
+ BUILDER_CONFIGS = [
55
+ datasets.BuilderConfig(
56
+ name="python",
57
+ description=textwrap.dedent(
58
+ """
59
+ This part of RepoBench-P is for python language.
60
+ """
61
+ )
62
+ ),
63
+ datasets.BuilderConfig(
64
+ name="java",
65
+ description=textwrap.dedent(
66
+ """
67
+ This part of RepoBench-P is for java language.
68
+ """
69
+ )
70
+ )
71
+ ]
72
+
73
+ def _info(self):
74
+ features = datasets.Features(
75
+ {
76
+ "repo_name": datasets.Value("string"),
77
+ "file_path": datasets.Value("string"),
78
+ "context": datasets.Sequence(feature={
79
+ "path": datasets.Value("string"),
80
+ "identifier": datasets.Value("string"),
81
+ "snippet": datasets.Value("string"),
82
+ "tokenized_snippet": datasets.Sequence(datasets.Value("string"))
83
+ }),
84
+ "import_statement": datasets.Value("string"),
85
+ "code": datasets.Value("string"),
86
+ "next_line": datasets.Value("string"),
87
+ "gold_snippet_index": datasets.Value("int32")
88
+ }
89
+ )
90
+
91
+ return datasets.DatasetInfo(
92
+ description=_DESCRIPTION,
93
+ features=features,
94
+ homepage=_HOMEPAGE,
95
+ license=_LICENSE,
96
+ citation=_CITATION,
97
+ )
98
+
99
+ def _split_generators(self, dl_manager):
100
+ """Returns SplitGenerators."""
101
+ config_urls = _URLs[self.config.name]
102
+ data_dir = dl_manager.download({
103
+ "cff": config_urls + "cross_file_first.gz",
104
+ "cfr": config_urls + "cross_file_random.gz",
105
+ "if": config_urls + "in_file.gz"
106
+ }
107
+ )
108
+
109
+ return [
110
+ datasets.SplitGenerator(
111
+ name=datasets.Split("cff"),
112
+ gen_kwargs={"data_dir": data_dir, "split": "cff"}
113
+ ),
114
+ datasets.SplitGenerator(
115
+ name=datasets.Split("cfr"),
116
+ gen_kwargs={"data_dir": data_dir, "split": "cfr"}
117
+ ),
118
+ datasets.SplitGenerator(
119
+ name=datasets.Split("if"),
120
+ gen_kwargs={"data_dir": data_dir, "split": "if"}
121
+ )
122
+ ]
123
+
124
+ def _generate_examples(self, data_dir, split):
125
+ """ Yields examples. """
126
+ with gzip.open(data_dir[split], "rb") as f:
127
+ data = pickle.load(f)
128
+
129
+ for i, example in enumerate(data[split]):
130
+
131
+ yield i, {
132
+ "repo_name": example["repo_name"],
133
+ "file_path": example["file_path"],
134
+ "context": example["context"],
135
+ "import_statement": example["import_statement"],
136
+ "code": example["code"],
137
+ "next_line": example["next_line"],
138
+ "gold_snippet_index": example["gold_snippet_index"]
139
+ }
140
+