from datasets import load_dataset, concatenate_datasets, Value, Features from transformers import GPT2Tokenizer new_features = Features({ 'max_stars_repo_path': Value('string'), 'max_stars_repo_name': Value('string'), 'max_stars_count': Value('int64'), # Ensure it is declared as int64 'id': Value('string'), 'content': Value('string') }) tokenizer = GPT2Tokenizer.from_pretrained("gpt2") def count_tokens(row_data): return {"n_tokens": len(tokenizer(row_data["content"])["input_ids"])} # Load subset in common programming language and JSON dc = load_dataset("bigcode/starcoderdata", data_dir="c", split="train").cast(new_features) #float dcpp = load_dataset("bigcode/starcoderdata", data_dir="cpp", split="train").cast(new_features) #float dpython = load_dataset("bigcode/starcoderdata", data_dir="python", split="train") djson = load_dataset("bigcode/starcoderdata", data_dir="json", split="train") djava = load_dataset("bigcode/starcoderdata", data_dir="java", split="train") # Remove the fields that we don't want seed = 42 aggregated_dataset = concatenate_datasets([dc, dpython, dcpp, djson, djava]) aggregated_dataset = aggregated_dataset.remove_columns(["id", "max_stars_repo_path", "max_stars_repo_name"]) aggregated_dataset = aggregated_dataset.shuffle(seed=seed) # Filter with star qualified_subset = aggregated_dataset.filter(lambda x: x["max_stars_count"] > 300, num_proc=16) # Reduce the size n_sample = min(2_500_000, qualified_subset.num_rows) target_dataset = qualified_subset.shuffle(seed=seed).select(range(n_sample)) # Add "n_tokens" field target_train_dataset = target_dataset['train'].map(count_tokens, num_proc=16) total_tokens = sum(target_train_dataset["n_tokens"]) # Save dataset in parquet target_dataset_dir = "/data/filtered_starcoder" target_train_dataset.to_parquet(target_dataset_dir)