File size: 3,674 Bytes
df57a76 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 |
import bz2
from typing import Iterator, Dict, Any, List, Optional
import pandas as pd
import os
import hashlib
import json
from sklearn.model_selection import StratifiedKFold
import numpy as np
from multiprocessing import Pool, cpu_count
from functools import partial
import subprocess
from .utils import (
read_jsonl_fields_fast,
process_answer_types,
create_stratified_subsamples,
subsample_jsonl_file,
)
class CragSampler:
"""Main class for handling CRAG dataset sampling operations."""
def __init__(
self,
input_file: str,
required_fields: Optional[List[str]] = None,
use_cache: bool = True,
):
"""Initialize CragSampler.
Args:
input_file: Path to input JSONL file (can be bz2 compressed)
required_fields: List of field names to extract. If None, uses default fields
use_cache: Whether to use/create cache file
"""
self.input_file = input_file
self.required_fields = required_fields or [
"domain",
"answer",
"question_type",
"static_or_dynamic",
]
self.use_cache = use_cache
self.df = self._load_data()
def _load_data(self) -> pd.DataFrame:
"""Load and process data from JSONL file."""
df = read_jsonl_fields_fast(
self.input_file, self.required_fields, self.use_cache
)
return process_answer_types(df)
def create_subsamples(
self,
n_subsamples: int = 5,
stratify_columns: Optional[List[str]] = None,
output_path: Optional[str] = None,
force_compute: bool = False,
) -> Dict:
"""Create stratified subsamples of the dataset.
Args:
n_subsamples: Number of subsamples to create
stratify_columns: Columns to use for stratification. If None, uses defaults
output_path: Path to save/load the JSON output
force_compute: If True, always compute subsamples even if file exists
Returns:
Dictionary containing the subsamples information
"""
if stratify_columns is None:
stratify_columns = [
"domain",
"answer_type",
"question_type",
"static_or_dynamic",
]
if output_path is None:
output_path = os.path.join(
os.path.dirname(self.input_file),
f"{os.path.splitext(os.path.basename(self.input_file))[0]}_subsamples.json",
)
return create_stratified_subsamples(
self.df,
n_subsamples=n_subsamples,
stratify_columns=stratify_columns,
output_path=output_path,
force_compute=force_compute,
)
def write_subsamples(
self,
subsamples_file: str,
output_dir: Optional[str] = None,
compress: bool = True,
n_processes: Optional[int] = None,
overwrite: bool = False,
) -> None:
"""Write subsamples to separate files.
Args:
subsamples_file: Path to JSON file containing subsample indices
output_dir: Directory to save subsample files
compress: Whether to compress output files with bz2
n_processes: Number of processes to use
overwrite: If False, skip existing output files
"""
subsample_jsonl_file(
self.input_file,
subsamples_file,
output_dir=output_dir,
compress=compress,
n_processes=n_processes,
overwrite=overwrite,
)
|