File size: 2,053 Bytes
df57a76 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 |
# Example usage
from crag_sampler import CragSampler
import json
import os
def run_crag_task_1_and_2(
file_path: str,
fields_to_extract: list[str] = None,
n_subsamples: int = 5,
output_dir: str = None,
compress: bool = True,
n_processes: int = None,
overwrite: bool = False,
):
"""Run the CRAG sampling pipeline for tasks 1 and 2.
Args:
file_path: Path to input JSONL file
fields_to_extract: List of fields to extract from JSONL
n_subsamples: Number of subsamples to create
output_dir: Directory for output files
compress: Whether to compress output files
n_processes: Number of processes for parallel processing
overwrite: Whether to overwrite existing files
"""
# Initialize sampler
sampler = CragSampler(
input_file=file_path, required_fields=fields_to_extract, use_cache=True
)
# Create output path for subsamples
output_path = os.path.join(
os.path.dirname(file_path),
f"{os.path.splitext(os.path.basename(file_path))[0]}_subsamples.json",
)
# Create subsamples
subsamples_data = sampler.create_subsamples(
n_subsamples=n_subsamples, output_path=output_path
)
# Print statistics
print(f"Created {subsamples_data['metadata']['n_subsamples']} subsamples")
print("\nGlobal statistics:")
print(json.dumps(subsamples_data["metadata"]["global_statistics"], indent=2))
print("\nFirst subsample statistics:")
print(json.dumps(subsamples_data["subsamples"][0]["statistics"], indent=2))
# Write subsamples to files
sampler.write_subsamples(
subsamples_file=output_path,
output_dir=output_dir,
compress=compress,
n_processes=n_processes,
overwrite=overwrite,
)
# Example usage
if __name__ == "__main__":
file_path = "./local_data/crag_task_1_and_2_dev_v4.jsonl.bz2"
fields_to_extract = ["domain", "answer", "question_type", "static_or_dynamic"]
run_crag_task_1_and_2(file_path, fields_to_extract)
|