Datasets:

Languages:
code
ArXiv:
License:
File size: 5,659 Bytes
4ba9fe8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ff0b80a
4ba9fe8
 
 
 
ff0b80a
4ba9fe8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the CodeClippy team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
CodeClippy dataset - opensource code from Github. Scrapped July 7 2021.
More to add here.
"""

import io
import json
from typing import List

import jsonlines
import zstandard as zstd

import datasets

import requests
from bs4 import BeautifulSoup


_CITATION = """\
@misc{cooper-2021-code-clippy-data,
    author       = {Nathan Coooper, Artashes Arutiunian, Santiago Hincapié-Potes, Ben Trevett, Arun Raja, Erfan Hossami, Mrinal Mathur, and contributors},
    title        = {{Code Clippy Data: A large dataset of code data from Github for research into code language models}},
    month        = jul,
    year         = 2021,
    version      = {1.0},
    publisher    = {GitHub},
    url          = {https://github.com/ncoop57/gpt-code-clippy}
}
"""

_DESCRIPTION = """
This dataset was generated by selecting GitHub repositories from a large collection of repositories. These repositories were collected from https://seart-ghs.si.usi.ch/ and Github portion of [The Pile](https://github.com/EleutherAI/github-downloader) (performed on July 7th, 2021). The goal of this dataset is to provide a training set for pretraining large language models on code data for helping software engineering researchers better understand their impacts on software related tasks such as autocompletion of code. The dataset is split into train, validation, and test splits. There is a version containing duplicates (209GBs compressed) and ones where exact duplicates (132GBs compressed) are removed. Contains mostly JavaScript and Python code, but other programming languages are included as well to various degrees.
"""

_HOMEPAGE = "https://github.com/CodedotAl/gpt-code-clippy/wiki"

_LICENSE = "GPL-3.0"

dataset_names = ["code_clippy_dedup_data", "code_clippy_dup_data"]
splits = ["train", "validation", "test"]

BASE_URL = "https://the-eye.eu/public/AI/training_data/code_clippy_data/"
_URLs = {}
for dataset in dataset_names:
    _URLs[dataset] = {}
    for split in splits:
        _URLs[dataset][split] = []
        url = BASE_URL + dataset + "/" + split
        r = requests.get(url)
        soup = BeautifulSoup(r.content, "html.parser")
        results = soup.find("pre")
        url_elements = results.find_all("a")
        for url_element in url_elements:
            if url_element.text == "../":
                continue
            _URLs[dataset][split].append(f"{BASE_URL}/{dataset}/{split}/{url_element.text}")


class CodeClippy(datasets.GeneratorBasedBuilder):
    """CodeClippy dataset - opensource code from Github. Scrapped July 7 2021."""

    VERSION = datasets.Version("0.1.0")

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(
            name="code_clippy_dedup_data",
            version=VERSION,
            description="Contains a deduplicated version of files scraped from GitHub including non-code related files such as txts.",
        ),
        datasets.BuilderConfig(
            name="code_clippy_dup_data",
            version=VERSION,
            description="Contains files scraped from GitHub including non-code related files such as txts. This version has duplicates.",
        ),
    ]

    DEFAULT_CONFIG_NAME = "code_clippy_dedup_data"

    def _info(self):
        features = datasets.Features(
            {
                "id": datasets.Value("int64"),
                "text": datasets.Value("string"),
                "repo_name": datasets.Value("string"),
                "stars": datasets.Value("string"),
                "repo_language": datasets.Value("string"),
                "file_name": datasets.Value("string"),
                "mime_type": datasets.Value("string"),
            }
        )
        return datasets.DatasetInfo(description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE)

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""

        # Download all the jsonlines files in zst compressed format
        downloaded_files = dl_manager.download(_URLs[self.config.name])
        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": downloaded_files["train"]}),
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepaths": downloaded_files["test"]}),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": downloaded_files["validation"]}
            ),
        ]

    def _generate_examples(self, filepaths: List):
        """Yields examples as (key, example) tuples."""
        id_ = 0
        dctx = zstd.ZstdDecompressor()
        # Loop through each filepath and read each line in the jsonlines file as an example
        for filepath in filepaths:
            with open(filepath, "rb") as f:
                f = dctx.stream_reader(f)
                f = io.TextIOWrapper(f, encoding="utf-8")
                f = jsonlines.Reader(f)
                for line in f:
                    yield id_, {"id": id_, "text": line["text"], **line["meta"]}
                    id_ += 1