Upload folder using huggingface_hub
Browse files- .gitattributes +3 -0
- README.md +38 -0
- code_knowledge_eval.py +83 -0
- test.json +3 -0
- train.json +3 -0
- validation.json +3 -0
.gitattributes
CHANGED
@@ -56,3 +56,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
56 |
# Video files - compressed
|
57 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
58 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
56 |
# Video files - compressed
|
57 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
58 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
59 |
+
test.json filter=lfs diff=lfs merge=lfs -text
|
60 |
+
train.json filter=lfs diff=lfs merge=lfs -text
|
61 |
+
validation.json filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
configs:
|
3 |
+
- config_name: default
|
4 |
+
data_files:
|
5 |
+
- split: train
|
6 |
+
path: "train.json"
|
7 |
+
- split: validation
|
8 |
+
path: "validation.json"
|
9 |
+
- split: test
|
10 |
+
path: "test.json"
|
11 |
+
---
|
12 |
+
|
13 |
+
|
14 |
+
# Code Knowledge Value Evaluation Dataset
|
15 |
+
|
16 |
+
This dataset is created by evaluating the knowledge value of code sourced from the `bigcode/the-stack` repository. It is designed to assess the educational and knowledge potential of different code samples.
|
17 |
+
|
18 |
+
## Dataset Overview
|
19 |
+
|
20 |
+
The dataset is split into training, validation, and test sets with the following number of samples:
|
21 |
+
|
22 |
+
- **Training set**: 22,786 samples
|
23 |
+
- **Validation set**: 4,555 samples
|
24 |
+
- **Test set**: 18,232 samples
|
25 |
+
|
26 |
+
## Usage
|
27 |
+
|
28 |
+
This dataset can be used to train and evaluate models that assess the knowledge value of code, potentially aiding in tasks such as automated code review, educational feedback, and curriculum development.
|
29 |
+
|
30 |
+
## Source
|
31 |
+
|
32 |
+
The dataset is based on the code from [`bigcode/the-stack`](https://huggingface.co/datasets/bigcode/the-stack), which provides a comprehensive collection of open-source code across various programming languages.
|
33 |
+
|
34 |
+
## License
|
35 |
+
|
36 |
+
Please ensure that you comply with the license of the original `bigcode/the-stack` repository when using this dataset.
|
37 |
+
|
38 |
+
|
code_knowledge_eval.py
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import glob
|
3 |
+
import json
|
4 |
+
|
5 |
+
import datasets
|
6 |
+
|
7 |
+
_VERSION = datasets.Version("1.0.0", "")
|
8 |
+
|
9 |
+
_URL = ""
|
10 |
+
|
11 |
+
_CITATION = """\
|
12 |
+
There is no citation information
|
13 |
+
"""
|
14 |
+
|
15 |
+
_DESCRIPTION = """\
|
16 |
+
simple code knowledge eval dataset loading script
|
17 |
+
"""
|
18 |
+
|
19 |
+
TRAIN_FILE = "train.json"
|
20 |
+
VALIDATION_FILE = "validation.json"
|
21 |
+
TEST_FILE = "test.json"
|
22 |
+
|
23 |
+
|
24 |
+
def generator(fpath):
|
25 |
+
with open(fpath, "r") as f:
|
26 |
+
in_json = json.load(f)
|
27 |
+
for item in in_json:
|
28 |
+
yield {
|
29 |
+
"content": item["content"],
|
30 |
+
"label": str(item["score"]),
|
31 |
+
"lang": item["lang"],
|
32 |
+
"repo_name": item["repo_name"],
|
33 |
+
"repo_path": item["repo_path"],
|
34 |
+
"repo_licenses": item["repo_licenses"],
|
35 |
+
}
|
36 |
+
|
37 |
+
class CodeKnowledgeEvalDataset(datasets.GeneratorBasedBuilder):
|
38 |
+
"""Code Knowledge Evaluation Dataset"""
|
39 |
+
|
40 |
+
BUILDER_CONFIGS = [
|
41 |
+
datasets.BuilderConfig(
|
42 |
+
name="default",
|
43 |
+
version=_VERSION,
|
44 |
+
description="Code Knowledge Evaluation Dataset",
|
45 |
+
)
|
46 |
+
]
|
47 |
+
|
48 |
+
def _info(self):
|
49 |
+
return datasets.DatasetInfo(
|
50 |
+
description=_DESCRIPTION,
|
51 |
+
features=datasets.Features(
|
52 |
+
{
|
53 |
+
"content": datasets.Value("string"),
|
54 |
+
"label": datasets.ClassLabel(num_classes=6, names=['0', '1', '2', '3', '4', '5']),
|
55 |
+
"lang": datasets.Value("string"),
|
56 |
+
"repo_name": datasets.Value("string"),
|
57 |
+
"repo_path": datasets.Value("string"),
|
58 |
+
"repo_licenses": datasets.Sequence(feature=datasets.Value("string")),
|
59 |
+
}
|
60 |
+
),
|
61 |
+
supervised_keys=None, # Probably needs to be fixed.
|
62 |
+
homepage=_URL,
|
63 |
+
citation=_CITATION,
|
64 |
+
|
65 |
+
)
|
66 |
+
|
67 |
+
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
68 |
+
|
69 |
+
path_kv = {
|
70 |
+
datasets.Split.TRAIN: TRAIN_FILE,
|
71 |
+
datasets.Split.VALIDATION: VALIDATION_FILE,
|
72 |
+
datasets.Split.TEST: TEST_FILE,
|
73 |
+
}
|
74 |
+
|
75 |
+
|
76 |
+
return [
|
77 |
+
datasets.SplitGenerator(name=k, gen_kwargs={'fpath': v}) for k, v in path_kv.items()
|
78 |
+
]
|
79 |
+
|
80 |
+
def _generate_examples(self, fpath):
|
81 |
+
"""Yields examples."""
|
82 |
+
for idx, item in enumerate(generator(fpath)):
|
83 |
+
yield idx, item
|
test.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fd6c302c6565f97d29aa4961158a29a4e2ec2106b62dcea21b4e42c08060581b
|
3 |
+
size 135086018
|
train.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:72897a33a1fda8c7fa3df994c1d39f721edadc7399a155b0fc70e89faee50f87
|
3 |
+
size 174395862
|
validation.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:29cedff69879f73d68a137be15a23e4d317a75b6e0bd1f5430d18c4559d9c2cc
|
3 |
+
size 33121774
|