File size: 1,550 Bytes
72e8a91
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
from huggingface_hub import create_repo, HfFolder, model_info, dataset_info
from datasets import load_dataset
import os
from datasets import concatenate_datasets

# Optionally specify your username or organization under which to create the repo
namespace = "yushengsu"  # Replace with your Hugging Face username or organization

# Your desired repository name
repo_name = "fineweb_edu_cleaned_modified"

full_repo_name = f"{namespace}/{repo_name}"

# Log in programmatically (if not logged in through CLI)
# token = "your_hf_token"
# HfFolder.save_token(token)

# Create the dataset repository
try:
    repo_details = dataset_info(full_repo_name)
    print(f"Repository already exists at: {repo_details.id}")
except Exception as e:
    # If the repository does not exist, create it
    repo_url = create_repo(full_repo_name, repo_type="dataset", private=False)
    print(f"Repository created at: {repo_url}")



concatenated_datasets_dict = {}
for idx in range(20):
    dataset = load_dataset("ruliad/fineweb_edu_100BT_chunk_0", cache_dir="/lustre/scratch/shared-folders/llm_project/yusheng/preprocessing_pre-trainig_data/.cache")
    if concatenated_datasets_dict == {}:
        for split in dataset.keys():
            concatenated_datasets_dict[split] = dataset[split]
    else:
        for split in dataset.keys():
            if split in concatenated_datasets_dict:
                concatenated_datasets_dict[split] = concatenate_datasets([concatenated_datasets_dict[split], dataset[split]])

dataset.push_to_hub(full_repo_name, private=False)