Datasets:

Languages:
code
ArXiv:
License:
ncoop57 commited on
Commit
4ba9fe8
1 Parent(s): 3a52ae0

Add code clippy dataset loader

Browse files
Files changed (1) hide show
  1. code_clippy.py +132 -0
code_clippy.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the CodeClippy team
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ CodeClippy dataset - opensource code from Github. Scrapped July 7 2021.
17
+ More to add here.
18
+ """
19
+
20
+ import io
21
+ import json
22
+ from typing import List
23
+
24
+ import jsonlines
25
+ import zstandard as zstd
26
+
27
+ import datasets
28
+
29
+ import requests
30
+ from bs4 import BeautifulSoup
31
+
32
+
33
+ _CITATION = """\
34
+ @misc{cooper-2021-code-clippy-data,
35
+ author = {Nathan Coooper, Artashes Arutiunian, Santiago Hincapié-Potes, Ben Trevett, Arun Raja, Erfan Hossami, Mrinal Mathur, and contributors},
36
+ title = {{Code Clippy Data: A large dataset of code data from Github for research into code language models}},
37
+ month = jul,
38
+ year = 2021,
39
+ version = {1.0},
40
+ publisher = {GitHub},
41
+ url = {https://github.com/ncoop57/gpt-code-clippy}
42
+ }
43
+ """
44
+
45
+ _DESCRIPTION = """
46
+ This dataset was generated by selecting GitHub repositories from a large collection of repositories. These repositories were collected from https://seart-ghs.si.usi.ch/ and Github portion of [The Pile](https://github.com/EleutherAI/github-downloader) (performed on July 7th, 2021). The goal of this dataset is to provide a training set for pretraining large language models on code data for helping software engineering researchers better understand their impacts on software related tasks such as autocompletion of code. The dataset is split into train, validation, and test splits. There is a version containing duplicates (209GBs compressed) and ones where exact duplicates (132GBs compressed) are removed. Contains mostly JavaScript and Python code, but other programming languages are included as well to various degrees.
47
+ """
48
+
49
+ _HOMEPAGE = "https://github.com/CodedotAl/gpt-code-clippy/wiki"
50
+
51
+ _LICENSE = "GPL-3.0"
52
+
53
+ datasets = ["code_clippy_dedup_data", "code_clippy_dup_data"]
54
+ splits = ["train", "validation", "test"]
55
+
56
+ BASE_URL = "https://the-eye.eu/public/AI/training_data/code_clippy_data/"
57
+ _URLs = {}
58
+ for dataset in datasets:
59
+ _URLs[dataset] = {}
60
+ for split in splits:
61
+ _URLs[dataset][split] = []
62
+ url = BASE_URL + dataset + "/" + split
63
+ r = requests.get(url)
64
+ soup = BeautifulSoup(r.content, "html.parser")
65
+ results = soup.find("pre")
66
+ url_elements = results.find_all("a")
67
+ for url_element in url_elements:
68
+ if url_element.text == "../":
69
+ continue
70
+ _URLs[dataset][split].append(f"{BASE_URL}/{dataset}/{split}/{url_element.text}")
71
+
72
+
73
+ class CodeClippy(datasets.GeneratorBasedBuilder):
74
+ """CodeClippy dataset - opensource code from Github. Scrapped July 7 2021."""
75
+
76
+ VERSION = datasets.Version("0.1.0")
77
+
78
+ BUILDER_CONFIGS = [
79
+ datasets.BuilderConfig(
80
+ name="code_clippy_dedup_data",
81
+ version=VERSION,
82
+ description="Contains a deduplicated version of files scraped from GitHub including non-code related files such as txts.",
83
+ ),
84
+ datasets.BuilderConfig(
85
+ name="code_clippy_dup_data",
86
+ version=VERSION,
87
+ description="Contains files scraped from GitHub including non-code related files such as txts. This version has duplicates.",
88
+ ),
89
+ ]
90
+
91
+ DEFAULT_CONFIG_NAME = "code_clippy_dedup_data"
92
+
93
+ def _info(self):
94
+ features = datasets.Features(
95
+ {
96
+ "id": datasets.Value("int64"),
97
+ "text": datasets.Value("string"),
98
+ "repo_name": datasets.Value("string"),
99
+ "stars": datasets.Value("string"),
100
+ "repo_language": datasets.Value("string"),
101
+ "file_name": datasets.Value("string"),
102
+ "mime_type": datasets.Value("string"),
103
+ }
104
+ )
105
+ return datasets.DatasetInfo(description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE)
106
+
107
+ def _split_generators(self, dl_manager):
108
+ """Returns SplitGenerators."""
109
+
110
+ # Download all the jsonlines files in zst compressed format
111
+ downloaded_files = dl_manager.download(_URLs[self.config.name])
112
+ return [
113
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": downloaded_files["train"]}),
114
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepaths": downloaded_files["test"]}),
115
+ datasets.SplitGenerator(
116
+ name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": downloaded_files["validation"]}
117
+ ),
118
+ ]
119
+
120
+ def _generate_examples(self, filepaths: List):
121
+ """Yields examples as (key, example) tuples."""
122
+ id_ = 0
123
+ dctx = zstd.ZstdDecompressor()
124
+ # Loop through each filepath and read each line in the jsonlines file as an example
125
+ for filepath in filepaths:
126
+ with open(filepath, "rb") as f:
127
+ f = dctx.stream_reader(f)
128
+ f = io.TextIOWrapper(f, encoding="utf-8")
129
+ f = jsonlines.Reader(f)
130
+ for line in f:
131
+ yield id_, {"id": id_, "text": line["text"], **line["meta"]}
132
+ id_ += 1