Files changed (1) hide show
  1. download_create_upload.py +41 -0
download_create_upload.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from huggingface_hub import create_repo, HfFolder, model_info, dataset_info
2
+ from datasets import load_dataset
3
+ import os
4
+ from datasets import concatenate_datasets
5
+
6
+ # Optionally specify your username or organization under which to create the repo
7
+ namespace = "yushengsu" # Replace with your Hugging Face username or organization
8
+
9
+ # Your desired repository name
10
+ repo_name = "fineweb_edu_cleaned_modified"
11
+
12
+ full_repo_name = f"{namespace}/{repo_name}"
13
+
14
+ # Log in programmatically (if not logged in through CLI)
15
+ # token = "your_hf_token"
16
+ # HfFolder.save_token(token)
17
+
18
+ # Create the dataset repository
19
+ try:
20
+ repo_details = dataset_info(full_repo_name)
21
+ print(f"Repository already exists at: {repo_details.id}")
22
+ except Exception as e:
23
+ # If the repository does not exist, create it
24
+ repo_url = create_repo(full_repo_name, repo_type="dataset", private=False)
25
+ print(f"Repository created at: {repo_url}")
26
+
27
+
28
+
29
+ concatenated_datasets_dict = {}
30
+ for idx in range(20):
31
+ dataset = load_dataset("ruliad/fineweb_edu_100BT_chunk_0", cache_dir="/lustre/scratch/shared-folders/llm_project/yusheng/preprocessing_pre-trainig_data/.cache")
32
+ if concatenated_datasets_dict == {}:
33
+ for split in dataset.keys():
34
+ concatenated_datasets_dict[split] = dataset[split]
35
+ else:
36
+ for split in dataset.keys():
37
+ if split in concatenated_datasets_dict:
38
+ concatenated_datasets_dict[split] = concatenate_datasets([concatenated_datasets_dict[split], dataset[split]])
39
+
40
+ dataset.push_to_hub(full_repo_name, private=False)
41
+