import json import datasets from sklearn.model_selection import train_test_split import pandas as pd import os logger = datasets.logging.get_logger(__name__) _CITATION = """\ @article{2016arXiv160605250R, author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev}, Konstantin and {Liang}, Percy}, title = "{SQuAD: 100,000+ Questions for Machine Comprehension of Text}", journal = {arXiv e-prints}, year = 2016, eid = {arXiv:1606.05250}, pages = {arXiv:1606.05250}, archivePrefix = {arXiv}, eprint = {1606.05250}, } """ _DESCRIPTION = """\ 解析維基百科打包好的 bz2 檔案,解析出所需內容,利用 wikitextparser 移除 Wiki 標記。\ 本資料檔是解析自維基百科 20230701 繁體中文版打包檔的內容。\ 解析後保留的欄位有兩個:條目名稱(title),條目內容(page article)。\ 原始的打包檔內容簡繁交雜,所以有利用 OpenCC 進行簡轉繁處理。\ 原始總條目數: 4,296,654 條目。\ 全部 4,296,249 個條目標題。\ 全部 4,296,249 個條目內容。\ 無法自動去標記的條目數: 5,415\ 有內容的條目數: 4,296,249 """ _URL = "https://huggingface.co/datasets/jslin09/wikipedia_tw" #_URLS = { # "train": _URL + "train-v1.1.json", # "dev": _URL + "dev-v1.1.json", #} class WikipediaConfig(datasets.BuilderConfig): """BuilderConfig for Wikipedia_tw.""" def __init__(self, **kwargs): """BuilderConfig for Wikipedia_tw. Args: **kwargs: keyword arguments forwarded to super. """ super(WikipediaConfig, self).__init__(**kwargs) class Wikipedia_tw(datasets.GeneratorBasedBuilder): """Wikipedia_tw: The Wekipedia Dataset in Traditional Chinese plain text. Version 1.0.""" BUILDER_CONFIGS = [ WikipediaConfig( name="plain_text", version=datasets.Version("1.0.0", ""), description="The Wekipedia in Traditional Chinese plain text.", ), ] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "title": datasets.Value("string"), "article": datasets.Value("string") } ), # No default supervised_keys (as we have to pass both question # and context as input). supervised_keys=None, homepage="https://huggingface.co/datasets/jslin09/wikipedia_tw", # citation=_CITATION, ) def _split_generators(self, dl_manager): downloaded_files = dl_manager.download_and_extract(_URL) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files}), # datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}), ] def _generate_examples(self, filepath): logger.info("generating examples from = %s", filepath) key = 0 with open(filepath, 'r', encoding='UTF-8') as f: wikipedia = json.load(f, strict=False,) for page_index in len(wikipedia): title = wikipedia[page_index]["title"] article = wikipedia[page_index]["article"] yield key, { "title": title, "article": article, } key=key+1