Create generate_from_starcoder.py
Browse files- generate_from_starcoder.py +44 -0
generate_from_starcoder.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datasets import load_dataset, concatenate_datasets, Value, Features
|
2 |
+
from transformers import GPT2Tokenizer
|
3 |
+
|
4 |
+
|
5 |
+
new_features = Features({
|
6 |
+
'max_stars_repo_path': Value('string'),
|
7 |
+
'max_stars_repo_name': Value('string'),
|
8 |
+
'max_stars_count': Value('int64'), # Ensure it is declared as int64
|
9 |
+
'id': Value('string'),
|
10 |
+
'content': Value('string')
|
11 |
+
})
|
12 |
+
|
13 |
+
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
|
14 |
+
|
15 |
+
def count_tokens(row_data):
|
16 |
+
return {"n_tokens": len(tokenizer(row_data["content"])["input_ids"])}
|
17 |
+
|
18 |
+
# Load subset in common programming language and JSON
|
19 |
+
dc = load_dataset("bigcode/starcoderdata", data_dir="c", split="train").cast(new_features) #float
|
20 |
+
dcpp = load_dataset("bigcode/starcoderdata", data_dir="cpp", split="train").cast(new_features) #float
|
21 |
+
dpython = load_dataset("bigcode/starcoderdata", data_dir="python", split="train")
|
22 |
+
djson = load_dataset("bigcode/starcoderdata", data_dir="json", split="train")
|
23 |
+
djava = load_dataset("bigcode/starcoderdata", data_dir="java", split="train")
|
24 |
+
|
25 |
+
# Remove the fields that we don't want
|
26 |
+
seed = 42
|
27 |
+
aggregated_dataset = concatenate_datasets([dc, dpython, dcpp, djson, djava])
|
28 |
+
aggregated_dataset = aggregated_dataset.remove_columns(["id", "max_stars_repo_path", "max_stars_repo_name"])
|
29 |
+
aggregated_dataset = aggregated_dataset.shuffle(seed=seed)
|
30 |
+
|
31 |
+
# Filter with star
|
32 |
+
qualified_subset = aggregated_dataset.filter(lambda x: x["max_stars_count"] > 300, num_proc=16)
|
33 |
+
|
34 |
+
# Reduce the size
|
35 |
+
n_sample = min(2_500_000, qualified_subset.num_rows)
|
36 |
+
target_dataset = qualified_subset.shuffle(seed=seed).select(range(n_sample))
|
37 |
+
|
38 |
+
# Add "n_tokens" field
|
39 |
+
target_train_dataset = target_dataset['train'].map(count_tokens, num_proc=16)
|
40 |
+
total_tokens = sum(target_train_dataset["n_tokens"])
|
41 |
+
|
42 |
+
# Save dataset in parquet
|
43 |
+
target_dataset_dir = "/data/filtered_starcoder"
|
44 |
+
target_train_dataset.to_parquet(target_dataset_dir)
|