File size: 1,850 Bytes
4ddefd1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
from datasets import load_dataset, concatenate_datasets, Value, Features
from transformers import GPT2Tokenizer


new_features = Features({
    'max_stars_repo_path': Value('string'),
    'max_stars_repo_name': Value('string'),
    'max_stars_count': Value('int64'),  # Ensure it is declared as int64
    'id': Value('string'),
    'content': Value('string')
})

tokenizer = GPT2Tokenizer.from_pretrained("gpt2")

def count_tokens(row_data):
    return {"n_tokens": len(tokenizer(row_data["content"])["input_ids"])}

# Load subset in common programming language and JSON
dc = load_dataset("bigcode/starcoderdata", data_dir="c", split="train").cast(new_features) #float
dcpp = load_dataset("bigcode/starcoderdata", data_dir="cpp", split="train").cast(new_features) #float
dpython = load_dataset("bigcode/starcoderdata", data_dir="python", split="train")
djson = load_dataset("bigcode/starcoderdata", data_dir="json", split="train")
djava = load_dataset("bigcode/starcoderdata", data_dir="java", split="train")

# Remove the fields that we don't want
seed = 42
aggregated_dataset = concatenate_datasets([dc, dpython, dcpp, djson, djava])
aggregated_dataset = aggregated_dataset.remove_columns(["id", "max_stars_repo_path", "max_stars_repo_name"])
aggregated_dataset = aggregated_dataset.shuffle(seed=seed)

# Filter with star
qualified_subset = aggregated_dataset.filter(lambda x: x["max_stars_count"] > 300, num_proc=16)

# Reduce the size
n_sample = min(2_500_000, qualified_subset.num_rows)
target_dataset = qualified_subset.shuffle(seed=seed).select(range(n_sample))

# Add "n_tokens" field
target_train_dataset = target_dataset['train'].map(count_tokens, num_proc=16)
total_tokens = sum(target_train_dataset["n_tokens"])

# Save dataset in parquet
target_dataset_dir = "/data/filtered_starcoder"
target_train_dataset.to_parquet(target_dataset_dir)