yushengsu's picture
Add code (#1)
72e8a91 verified
raw
history blame
1.55 kB
from huggingface_hub import create_repo, HfFolder, model_info, dataset_info
from datasets import load_dataset
import os
from datasets import concatenate_datasets
# Optionally specify your username or organization under which to create the repo
namespace = "yushengsu" # Replace with your Hugging Face username or organization
# Your desired repository name
repo_name = "fineweb_edu_cleaned_modified"
full_repo_name = f"{namespace}/{repo_name}"
# Log in programmatically (if not logged in through CLI)
# token = "your_hf_token"
# HfFolder.save_token(token)
# Create the dataset repository
try:
repo_details = dataset_info(full_repo_name)
print(f"Repository already exists at: {repo_details.id}")
except Exception as e:
# If the repository does not exist, create it
repo_url = create_repo(full_repo_name, repo_type="dataset", private=False)
print(f"Repository created at: {repo_url}")
concatenated_datasets_dict = {}
for idx in range(20):
dataset = load_dataset("ruliad/fineweb_edu_100BT_chunk_0", cache_dir="/lustre/scratch/shared-folders/llm_project/yusheng/preprocessing_pre-trainig_data/.cache")
if concatenated_datasets_dict == {}:
for split in dataset.keys():
concatenated_datasets_dict[split] = dataset[split]
else:
for split in dataset.keys():
if split in concatenated_datasets_dict:
concatenated_datasets_dict[split] = concatenate_datasets([concatenated_datasets_dict[split], dataset[split]])
dataset.push_to_hub(full_repo_name, private=False)