import json from pathlib import Path import datasets from datasets import Value, Sequence _CITATION = ''' @article{kirchner2022understanding, title={Understanding AI Alignment Research: A Systematic Analysis}, author={Kirchner, Joshua H and Smith, Lauren and Thibodeau, Joseph and McDonnell, Kathleen and Reynolds, Lauren}, journal={arXiv preprint arXiv:2022.4338861}, year={2022} } ''' _DESCRIPTION = """A dataset of AI alignment research, collected from various sources.""" _HOMEPAGE = "https://github.com/StampyAI/alignment-research-dataset" _LICENSE = "" _VERSION_ = '0.0.0' def iterate_file(filename): with open(filename) as f: for l in f: try: yield json.loads(l) except Exception as e: print(f'Could not parse: {l}') ## Feature extractor helpers def get_type(value): """Recursively get the huggingface type for the provided value.""" if value is None: return None if value and isinstance(value, (tuple, list)): return features.Sequence( get_type(value[0]) ) if value and isinstance(value, dict): return {k: get_type(v) for k, v in value.items()} if isinstance(value, str): return Value('string') if isinstance(value, int): return Value('int32') if isinstance(value, float): return Value('double') if isinstance(value, bool): return Value('bool') return None def print_extra_features(files): """Go through all the provided files, and get the non default features for the given file. This can be done manually but would be a hassle. It's assumed that the files contain a json object on each line. """ ignored_keys = [ 'comments', # Comments are arbitrarily nested objects, which doesn't play nice with huggingface ] per_file = {} for filename in sorted(files): extra_types = {} for item in iterate_file(filename): for k, v in item.items(): if (k not in extra_types or not extra_types[k]) and k not in ignored_keys and k not in DEFAULT_FEATURES: extra_types[k] = get_type(v) per_file[filename] = extra_types print('DATASOURCES = {') for k, features in per_file.items(): vals = ',\n'.join(f" '{k}': {v}" for k, v in features.items()) print(f" '{k.stem}': #\n{vals}\n $,".replace('#', '{').replace('$', '}')) print('}') # These keys are present in all files DEFAULT_FEATURES = { 'id': Value('string'), 'source': Value('string'), 'title': Value('string'), 'text': Value('large_string'), 'url': Value('string'), 'date_published': Value(dtype='string'), } # Per datasource additional features DATASOURCES = { 'agentmodels': { 'source_filetype': Value(dtype='string', id=None), 'converted_with': Value(dtype='string', id=None), 'book_title': Value(dtype='string', id=None), 'authors': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None) }, 'aiimpacts.org': { 'paged_url': Value(dtype='string', id=None) }, 'aipulse.org': { 'paged_url': Value(dtype='string', id=None) }, 'aisafety.camp': { 'paged_url': Value(dtype='string', id=None) }, 'alignment_newsletter': { 'converted_with': Value(dtype='string', id=None), 'source_type': Value(dtype='string', id=None), 'venue': Value(dtype='string', id=None), 'newsletter_category': Value(dtype='string', id=None), 'highlight': Value(dtype='int32', id=None), 'newsletter_number': Value(dtype='string', id=None), 'summarizer': Value(dtype='string', id=None), 'opinion': Value(dtype='string', id=None), 'prerequisites': Value(dtype='string', id=None), 'read_more': Value(dtype='string', id=None), 'authors': Value(dtype='string', id=None) }, 'arbital': { 'source_filetype': Value(dtype='string', id=None), 'authors': Value(dtype='string', id=None), 'alias': Value(dtype='string', id=None) }, 'arxiv_papers': { 'authors': Value(dtype='string', id=None), 'source_type': Value(dtype='string', id=None), 'converted_with': Value(dtype='string', id=None), 'data_last_modified': Value(dtype='string', id=None), 'abstract': Value(dtype='string', id=None), 'author_comment': Value(dtype='string', id=None), 'journal_ref': Value(dtype='string', id=None), 'doi': Value(dtype='string', id=None), 'primary_category': Value(dtype='string', id=None), 'categories': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None) }, 'audio_transcripts': { 'source_filetype': Value(dtype='string', id=None), 'converted_with': Value(dtype='string', id=None), 'authors': Value(dtype='string', id=None) }, 'carado.moe': { 'source_type': Value(dtype='string', id=None), 'authors': Value(dtype='string', id=None) }, 'cold.takes': {}, 'deepmind.blog': { 'source_type': Value(dtype='string', id=None) }, 'distill': { 'source_type': Value(dtype='string', id=None), 'converted_with': Value(dtype='string', id=None), 'authors': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'abstract': Value(dtype='string', id=None), 'journal_ref': Value(dtype='string', id=None), 'doi': Value(dtype='string', id=None), 'bibliography_bib': Sequence(feature={'title': Value(dtype='string', id=None)}, length=-1, id=None) }, 'eaforum': { 'authors': Value(dtype='string', id=None), 'score': Value(dtype='string', id=None), 'omega_karma': Value(dtype='string', id=None), 'votes': Value(dtype='string', id=None), 'tags': Value(dtype='string', id=None) }, 'gdocs': { 'source_filetype': Value(dtype='string', id=None), 'converted_with': Value(dtype='string', id=None), 'authors': Value(dtype='string', id=None), 'docx_name': Value(dtype='string', id=None) }, 'gdrive_ebooks': { 'source_filetype': Value(dtype='string', id=None), 'converted_with': Value(dtype='string', id=None), 'chapter_names': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'file_name': Value(dtype='string', id=None) }, 'generative.ink': {}, 'gwern_blog': { 'authors': Value(dtype='string', id=None) }, 'intelligence.org': { 'paged_url': Value(dtype='string', id=None) }, 'jsteinhardt.wordpress.com': { 'paged_url': Value(dtype='string', id=None) }, 'lesswrong': { 'authors': Value(dtype='string', id=None), 'score': Value(dtype='string', id=None), 'omega_karma': Value(dtype='string', id=None), 'votes': Value(dtype='string', id=None), 'tags': Value(dtype='string', id=None) }, 'markdown.ebooks': { 'source_type': Value(dtype='string', id=None), 'authors': Value(dtype='string', id=None), 'filename': Value(dtype='string', id=None) }, 'nonarxiv_papers': { 'source_filetype': Value(dtype='string', id=None), 'abstract': Value(dtype='string', id=None), 'authors': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'filename': Value(dtype='string', id=None) }, 'qualiacomputing.com': { 'paged_url': Value(dtype='string', id=None) }, 'reports': { 'source_filetype': Value(dtype='string', id=None), 'abstract': Value(dtype='string', id=None), 'authors': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'filename': Value(dtype='string', id=None) }, 'stampy': { 'source_filetype': Value(dtype='string', id=None), 'authors': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None), 'answer': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'entry': Value(dtype='string', id=None) }, 'vkrakovna.wordpress.com': { 'paged_url': Value(dtype='string', id=None) }, 'waitbutwhy': { 'source_type': Value(dtype='string', id=None), 'authors': Value(dtype='string', id=None) }, 'www.yudkowsky.net': { 'paged_url': Value(dtype='string', id=None) }, } def join_features(features, to_join): """Recursively join the provided dicts. `to_join` can either be a dict to be merged, or a list of dicts to merge. """ if not to_join: return datasets.Features(features) if isinstance(to_join, dict): return datasets.Features(dict(features, **to_join)) return join_features(dict(features, **to_join[0]), to_join[1:]) class AlignmentResearchDatasetConfig(datasets.BuilderConfig): """BuilderConfig for AlignmentResaerchDataset.""" def __init__(self, sources, features, **kwargs): """BuilderConfig for AlignmentResaerchDataset. :param List[string] sources: the sources which will be used by this config """ super().__init__(version=datasets.Version(_VERSION_), **kwargs) self.sources = sources self.features = join_features(DEFAULT_FEATURES, features) @property def files(self): return [f'{source}.jsonl' for source in self.sources] class AlignmentResaerchDataset(datasets.GeneratorBasedBuilder): VERSION = datasets.Version(_VERSION_) BUILDER_CONFIGS = [ AlignmentResearchDatasetConfig( name='all', description='All data files', sources=list(DATASOURCES.keys()), features=list(DATASOURCES.values()) ) ] + [ AlignmentResearchDatasetConfig(name=source, sources=[source], features=features) for source, features in DATASOURCES.items() ] DEFAULT_CONFIG_NAME = 'all' def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=self.config.features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={'files': dl_manager.download(self.config.files)} ) ] # method parameters are unpacked from `gen_kwargs` as given in `_split_generators` def _generate_examples(self, files): seen = set() def is_good(item): item_id = item and item.get('id') if not item_id or item_id in seen: return False seen.add(item_id) return item['text'] not in [None, '', 'n/a'] def prepare_example(item): return item['id'], {k: item.get(k) for k in self.config.features} lines = (item for filename in files for item in iterate_file(filename)) return map(prepare_example, filter(is_good, lines))