|
from huggingface_hub import create_repo, HfFolder, model_info, dataset_info |
|
from datasets import load_dataset |
|
import os |
|
from datasets import concatenate_datasets |
|
|
|
|
|
namespace = "yushengsu" |
|
|
|
|
|
repo_name = "fineweb_edu_cleaned_modified" |
|
|
|
full_repo_name = f"{namespace}/{repo_name}" |
|
|
|
|
|
|
|
|
|
|
|
|
|
try: |
|
repo_details = dataset_info(full_repo_name) |
|
print(f"Repository already exists at: {repo_details.id}") |
|
except Exception as e: |
|
|
|
repo_url = create_repo(full_repo_name, repo_type="dataset", private=False) |
|
print(f"Repository created at: {repo_url}") |
|
|
|
|
|
|
|
concatenated_datasets_dict = {} |
|
for idx in range(20): |
|
dataset = load_dataset("ruliad/fineweb_edu_100BT_chunk_0", cache_dir="/lustre/scratch/shared-folders/llm_project/yusheng/preprocessing_pre-trainig_data/.cache") |
|
if concatenated_datasets_dict == {}: |
|
for split in dataset.keys(): |
|
concatenated_datasets_dict[split] = dataset[split] |
|
else: |
|
for split in dataset.keys(): |
|
if split in concatenated_datasets_dict: |
|
concatenated_datasets_dict[split] = concatenate_datasets([concatenated_datasets_dict[split], dataset[split]]) |
|
|
|
dataset.push_to_hub(full_repo_name, private=False) |
|
|
|
|