|
|
|
from crag_sampler import CragSampler |
|
import json |
|
import os |
|
|
|
|
|
def run_crag_task_1_and_2( |
|
file_path: str, |
|
fields_to_extract: list[str] = None, |
|
n_subsamples: int = 5, |
|
output_dir: str = None, |
|
compress: bool = True, |
|
n_processes: int = None, |
|
overwrite: bool = False, |
|
): |
|
"""Run the CRAG sampling pipeline for tasks 1 and 2. |
|
|
|
Args: |
|
file_path: Path to input JSONL file |
|
fields_to_extract: List of fields to extract from JSONL |
|
n_subsamples: Number of subsamples to create |
|
output_dir: Directory for output files |
|
compress: Whether to compress output files |
|
n_processes: Number of processes for parallel processing |
|
overwrite: Whether to overwrite existing files |
|
""" |
|
|
|
sampler = CragSampler( |
|
input_file=file_path, required_fields=fields_to_extract, use_cache=True |
|
) |
|
|
|
|
|
output_path = os.path.join( |
|
os.path.dirname(file_path), |
|
f"{os.path.splitext(os.path.basename(file_path))[0]}_subsamples.json", |
|
) |
|
|
|
|
|
subsamples_data = sampler.create_subsamples( |
|
n_subsamples=n_subsamples, output_path=output_path |
|
) |
|
|
|
|
|
print(f"Created {subsamples_data['metadata']['n_subsamples']} subsamples") |
|
print("\nGlobal statistics:") |
|
print(json.dumps(subsamples_data["metadata"]["global_statistics"], indent=2)) |
|
print("\nFirst subsample statistics:") |
|
print(json.dumps(subsamples_data["subsamples"][0]["statistics"], indent=2)) |
|
|
|
|
|
sampler.write_subsamples( |
|
subsamples_file=output_path, |
|
output_dir=output_dir, |
|
compress=compress, |
|
n_processes=n_processes, |
|
overwrite=overwrite, |
|
) |
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
file_path = "./local_data/crag_task_1_and_2_dev_v4.jsonl.bz2" |
|
fields_to_extract = ["domain", "answer", "question_type", "static_or_dynamic"] |
|
|
|
run_crag_task_1_and_2(file_path, fields_to_extract) |
|
|