Datasets:

Modalities:
Text
Formats:
json
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
CRAG / crag_sampler /sampler.py
jchevallard's picture
feat: converting CRAG sampler to a package for simpler install and use
df57a76
raw
history blame
3.67 kB
import bz2
from typing import Iterator, Dict, Any, List, Optional
import pandas as pd
import os
import hashlib
import json
from sklearn.model_selection import StratifiedKFold
import numpy as np
from multiprocessing import Pool, cpu_count
from functools import partial
import subprocess
from .utils import (
read_jsonl_fields_fast,
process_answer_types,
create_stratified_subsamples,
subsample_jsonl_file,
)
class CragSampler:
"""Main class for handling CRAG dataset sampling operations."""
def __init__(
self,
input_file: str,
required_fields: Optional[List[str]] = None,
use_cache: bool = True,
):
"""Initialize CragSampler.
Args:
input_file: Path to input JSONL file (can be bz2 compressed)
required_fields: List of field names to extract. If None, uses default fields
use_cache: Whether to use/create cache file
"""
self.input_file = input_file
self.required_fields = required_fields or [
"domain",
"answer",
"question_type",
"static_or_dynamic",
]
self.use_cache = use_cache
self.df = self._load_data()
def _load_data(self) -> pd.DataFrame:
"""Load and process data from JSONL file."""
df = read_jsonl_fields_fast(
self.input_file, self.required_fields, self.use_cache
)
return process_answer_types(df)
def create_subsamples(
self,
n_subsamples: int = 5,
stratify_columns: Optional[List[str]] = None,
output_path: Optional[str] = None,
force_compute: bool = False,
) -> Dict:
"""Create stratified subsamples of the dataset.
Args:
n_subsamples: Number of subsamples to create
stratify_columns: Columns to use for stratification. If None, uses defaults
output_path: Path to save/load the JSON output
force_compute: If True, always compute subsamples even if file exists
Returns:
Dictionary containing the subsamples information
"""
if stratify_columns is None:
stratify_columns = [
"domain",
"answer_type",
"question_type",
"static_or_dynamic",
]
if output_path is None:
output_path = os.path.join(
os.path.dirname(self.input_file),
f"{os.path.splitext(os.path.basename(self.input_file))[0]}_subsamples.json",
)
return create_stratified_subsamples(
self.df,
n_subsamples=n_subsamples,
stratify_columns=stratify_columns,
output_path=output_path,
force_compute=force_compute,
)
def write_subsamples(
self,
subsamples_file: str,
output_dir: Optional[str] = None,
compress: bool = True,
n_processes: Optional[int] = None,
overwrite: bool = False,
) -> None:
"""Write subsamples to separate files.
Args:
subsamples_file: Path to JSON file containing subsample indices
output_dir: Directory to save subsample files
compress: Whether to compress output files with bz2
n_processes: Number of processes to use
overwrite: If False, skip existing output files
"""
subsample_jsonl_file(
self.input_file,
subsamples_file,
output_dir=output_dir,
compress=compress,
n_processes=n_processes,
overwrite=overwrite,
)