metadata
dataset_info:
features:
- name: source
dtype: string
- name: task_type
dtype: string
- name: in_source_id
dtype: string
- name: problem
dtype: string
- name: gold_standard_solution
dtype: string
- name: problem_id
dtype: string
- name: metadata
struct:
- name: difficulty
dtype: string
- name: memory_limit
dtype: string
- name: memory_limit_bytes
dtype: int64
- name: problem_url
dtype: string
- name: time_limit
dtype: string
- name: verification_info
struct:
- name: language
dtype: string
- name: test_cases
list:
- name: fn_name
dtype: string
- name: input
dtype: string
- name: output
dtype: string
- name: type
dtype: string
splits:
- name: train
num_bytes: 385930754
num_examples: 27839
download_size: 235246000
dataset_size: 385930754
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
import datasets
ds = datasets.load_dataset("open-r1/verifiable-coding-problems-python_decontaminated", split="train")
ds_tc_6 = ds.filter(lambda x: len(x["verification_info"]["test_cases"]) <= 6)
ds_tc_6.push_to_hub("rasdani/verifiable-coding-problems-python_decontaminated_six_test_cases")
Before:
After: