--- dataset_info: features: - name: text dtype: string splits: - name: ko num_bytes: 60617676 num_examples: 15000 - name: zh num_bytes: 61325384 num_examples: 15000 - name: en num_bytes: 72376433 num_examples: 15000 - name: ja num_bytes: 66958316 num_examples: 15000 download_size: 159349421 dataset_size: 261277809 configs: - config_name: default data_files: - split: ko path: data/ko-* - split: zh path: data/zh-* - split: en path: data/en-* - split: ja path: data/ja-* --- ```python from datasets import load_dataset, Dataset from itertools import islice from operator import itemgetter import re def extract_dump_date(dump_str): """Extract date from dump string like 'CC-MAIN-2024-10'""" if not dump_str: return '0' # For items without dump field match = re.search(r'(\d{4}-\d{2})', dump_str) return match.group(1) if match else '0' def load_and_verify_datasets(): # Load streaming datasets datasets = { "ko": load_dataset("HuggingFaceFW/fineweb-2", "kor_Hang", split="test", streaming=True), "zh": load_dataset("HuggingFaceFW/fineweb-2", "cmn_Hani", split="test", streaming=True), "en": load_dataset("HuggingFaceFW/fineweb-edu", "CC-MAIN-2024-10", split="train", streaming=True), "ja": load_dataset("HuggingFaceFW/fineweb-2", "jpn_Jpan", split="test", streaming=True), } processed_datasets = {} for lang, ds in datasets.items(): print(f"\nProcessing {lang} dataset...") # Collect items with their dumps items_with_dumps = [] for item in islice(ds, 100000): # Collect first 100K items dump = item.get('dump', '') items_with_dumps.append((item, dump)) # Sort by dump date in descending order (most recent first) items_with_dumps.sort(key=lambda x: extract_dump_date(x[1]), reverse=True) # Print dump distribution of sorted data print("\nDump distribution (most recent first):") dump_counts = {} for _, dump in items_with_dumps[:1000]: # Check first 1000 items dump_counts[dump] = dump_counts.get(dump, 0) + 1 for dump, count in sorted(dump_counts.items(), key=lambda x: extract_dump_date(x[0]), reverse=True): print(f" {dump}: {count} items") texts_set = set() filtered_texts = [] # Process sorted items for item, dump in items_with_dumps: text = item.get('text', item.get('content', '')).strip() # Basic quality filters if text and len(text) > 50: # Only keep non-empty texts with reasonable length texts_set.add(text) if len(texts_set) >= 15000: break # Convert set to list and create dataset filtered_texts = list(texts_set) processed_datasets[lang] = Dataset.from_dict({"text": filtered_texts}) print(f"\n{lang} dataset final size: {len(processed_datasets[lang])} examples") # Print sample texts with their dumps print("\nSample texts from most recent dump:") samples_shown = 0 for item, dump in items_with_dumps: if samples_shown >= 2: break text = item.get('text', item.get('content', '')).strip() if text in texts_set: print(f"Dump: {dump}") print(f"Length: {len(text)}") print(f"Text preview: {text[:100]}...") print("---") samples_shown += 1 return processed_datasets def main(): try: datasets = load_and_verify_datasets() print("\nDatasets processed with the following sizes:") for lang, ds in datasets.items(): print(f"{lang}: {len(ds)} examples") # Create DatasetDict print("\nCreating DatasetDict...") dataset_dict = create_dataset_dict(datasets) # Upload to Hugging Face # Replace with your values REPO_NAME = "yourname/reponame" HF_TOKEN = "your_api_key" # Don't share this token! print("\nUploading to Hugging Face Hub...") upload_to_huggingface(dataset_dict, REPO_NAME, HF_TOKEN) print(f"\nDataset uploaded successfully to {REPO_NAME}") except Exception as e: print(f"Error processing datasets: {str(e)}") if __name__ == "__main__": main() ```