Datasets:
# coding=utf-8 | |
"""Lexicap: Lex Friedman Podcast Whisper Captions.""" | |
import csv | |
import datasets | |
_CITATION = """\ | |
""" | |
_DESCRIPTION = """\ | |
Lexicap contains the captions for every Lex Friedman Podcast episode. It it created by [Dr. Andrej Karpathy](https://twitter.com/karpathy). | |
There are 430 caption files available. There are 2 types of files: | |
- large | |
- small | |
Each file name follows the format `episode_{episode_number}_{file_type}.vtt`. | |
""" | |
class LexicapConfig(datasets.BuilderConfig): | |
"""BuilderConfig for Lexicap.""" | |
def __init__(self, **kwargs): | |
"""Constructs a LexicapConfig. | |
Args: | |
**kwargs: keyword arguments forwarded to super. | |
""" | |
super(LexicapConfig, self).__init__(version=datasets.Version("0.1.0", ""), **kwargs), | |
class Lexicap(datasets.GeneratorBasedBuilder): | |
"""Lexicap dataset.""" | |
BUILDER_CONFIGS = [ | |
LexicapConfig( # pylint: disable=g-complex-comprehension | |
name=config_name, | |
description=( | |
f"A dataset consisting of captions for every Lex Friedman Podcast episode, generated using OpenAI Whisper. This dataset is created by [Dr. Andrej Karpathy](https://twitter.com/karpathy). | |
), | |
) | |
for config_name in _DATA_OPTIONS | |
] | |
def _info(self): | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION | |
) | |
def _split_generators(self, dl_manager): | |
# There is no predefined train/val/test split for this dataset. | |
return [ | |
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"file_path": 'vtt'}), | |
] | |