text
stringlengths 0
15.3k
|
---|
return len(doc.encode('utf-8')) |
@classmethod |
def count_words(cls, doc) -> int: |
return len(re.split('\\s+', doc)) |
# File: lm-evaluation-harness-main/lm_eval/caching/cache.py |
import hashlib |
import os |
import dill |
from lm_eval.utils import eval_logger |
MODULE_DIR = os.path.dirname(os.path.realpath(__file__)) |
OVERRIDE_PATH = os.getenv('LM_HARNESS_CACHE_PATH') |
PATH = OVERRIDE_PATH if OVERRIDE_PATH else f'{MODULE_DIR}/.cache' |
HASH_INPUT = 'EleutherAI-lm-evaluation-harness' |
HASH_PREFIX = hashlib.sha256(HASH_INPUT.encode('utf-8')).hexdigest() |
FILE_SUFFIX = f'.{HASH_PREFIX}.pickle' |
def load_from_cache(file_name): |
try: |
path = f'{PATH}/{file_name}{FILE_SUFFIX}' |
with open(path, 'rb') as file: |
cached_task_dict = dill.loads(file.read()) |
return cached_task_dict |
except Exception: |
eval_logger.debug(f'{file_name} is not cached, generating...') |
pass |
def save_to_cache(file_name, obj): |
if not os.path.exists(PATH): |
os.mkdir(PATH) |
file_path = f'{PATH}/{file_name}{FILE_SUFFIX}' |
eval_logger.debug(f'Saving {file_path} to cache...') |
with open(file_path, 'wb') as file: |
file.write(dill.dumps(obj)) |
def delete_cache(key: str=''): |
files = os.listdir(PATH) |
for file in files: |
if file.startswith(key) and file.endswith(FILE_SUFFIX): |
file_path = f'{PATH}/{file}' |
os.unlink(file_path) |
# File: lm-evaluation-harness-main/lm_eval/decontamination/archiver.py |
import datetime |
import io |
import json |
import mmap |
import os |
from pathlib import Path |
from typing import Any |
import jsonlines |
import tqdm |
import zstandard |
def json_serial(obj: Any) -> str: |
if isinstance(obj, (datetime.datetime,)): |
return obj.isoformat() |
raise TypeError('Type %s not serializable' % type(obj)) |
class Archive: |
def __init__(self, file_path: str, compression_level: int=3) -> None: |
self.file_path = file_path |
dir_name = os.path.dirname(file_path) |
if dir_name: |
os.makedirs(dir_name, exist_ok=True) |
self.fh = open(self.file_path, 'wb') |
self.cctx = zstandard.ZstdCompressor(level=compression_level) |
self.compressor = self.cctx.stream_writer(self.fh) |
def add_data(self, data, meta=None) -> None: |
if meta is None: |
meta = {} |
self.compressor.write(json.dumps({'text': data, 'meta': meta}, default=json_serial).encode('UTF-8') + b'\n') |
def commit(self) -> None: |
self.compressor.flush(zstandard.FLUSH_FRAME) |
self.fh.flush() |
self.fh.close() |
class Reader: |
def __init__(self) -> None: |
pass |
def read(self, file, get_meta: bool=False, autojoin_paragraphs: bool=True, para_joiner: str='\n\n'): |
with open(file, 'rb') as fh: |
self.fh = fh |
cctx = zstandard.ZstdDecompressor() |
reader = io.BufferedReader(cctx.stream_reader(fh)) |
rdr = jsonlines.Reader(reader) |
for ob in rdr: |
if isinstance(ob, str): |
assert not get_meta |
yield ob |
continue |
text = ob['text'] |
if autojoin_paragraphs and isinstance(text, list): |
text = para_joiner.join(text) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.