Datasets:

Modalities:
Text
Formats:
parquet
Languages:
Danish
Libraries:
Datasets
Dask
File size: 8,450 Bytes
d90d2fe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
import os
import json
import argparse
from datetime import datetime
from datasets import Dataset
from huggingface_hub import HfApi, upload_file
import shutil
import math

def clean_jsonl_data(file_path):
    """Clean and validate JSONL file data."""
    cleaned_data = []
    with open(file_path, "r", encoding="utf-8") as f:
        for line_number, line in enumerate(f, start=1):
            try:
                data = json.loads(line)

                # Validate 'timestamp' field
                if "timestamp" in data:
                    if not data["timestamp"] or not isinstance(data["timestamp"], str):
                        data["timestamp"] = None
                    else:
                        try:
                            datetime_obj = datetime.fromisoformat(
                                data["timestamp"].replace("Z", "+00:00")
                            )
                            data["timestamp"] = datetime_obj.isoformat()
                        except ValueError:
                            data["timestamp"] = None

                # Ensure 'text' is a string
                if "text" in data and not isinstance(data["text"], str):
                    data["text"] = str(data["text"]) if data["text"] is not None else None

                # Validate 'url' and 'source'
                if "url" in data and not isinstance(data["url"], str):
                    data["url"] = str(data["url"]) if data["url"] is not None else None

                if "source" in data and not isinstance(data["source"], str):
                    data["source"] = str(data["source"]) if data["source"] is not None else None

                cleaned_data.append(data)

            except json.JSONDecodeError as e:
                print(f"JSON decode error at line {line_number}: {e}")
            except Exception as e:
                print(f"Error processing line {line_number}: {e}")

    return cleaned_data

def estimate_num_shards(file_path, target_shard_size_gb=1):
    """Estimate the number of shards needed based on file size."""
    file_size_gb = os.path.getsize(file_path) / (1024 ** 3)  # Bytes to GB
    num_shards = max(1, math.ceil(file_size_gb / target_shard_size_gb))
    return num_shards

def split_jsonl_file(input_file, output_prefix, max_size_gb=45):
    """Split large JSONL files into smaller shards."""
    file_size_gb = os.path.getsize(input_file) / (1024 ** 3)  # Convert bytes to GB
    if file_size_gb <= max_size_gb:
        return [input_file]  # No need to split if below limit

    # Calculate lines per shard
    with open(input_file, "r", encoding="utf-8") as f:
        lines = f.readlines()
    num_lines = len(lines)

    num_shards = math.ceil(file_size_gb / max_size_gb)
    lines_per_shard = math.ceil(num_lines / num_shards)

    shard_files = []
    for i in range(num_shards):
        shard_file = f"{output_prefix}_part{i+1}.jsonl"
        with open(shard_file, "w", encoding="utf-8") as f:
            f.writelines(lines[i * lines_per_shard:(i + 1) * lines_per_shard])
        shard_files.append(shard_file)

    return shard_files

def upload_large_file(file_path, repo_id, path_in_repo, repo_type="dataset"):
    """Upload large files with multi-part upload handling."""
    file_size_mb = os.path.getsize(file_path) / (1024 ** 2)  # Convert bytes to MB
    # Use multi-part upload for files > 5MB
    if file_size_mb > 5:
        upload_file(
            path_or_fileobj=file_path,
            path_in_repo=path_in_repo,
            repo_id=repo_id,
            repo_type=repo_type,
            use_auth_token=True,
        )
        print(f"Uploaded '{path_in_repo}' with multi-part upload.")
    else:
        # Direct upload for smaller files
        with open(file_path, 'rb') as f:
            api = HfApi()
            api.upload_file(
                path_or_fileobj=f,
                path_in_repo=path_in_repo,
                repo_id=repo_id,
                repo_type=repo_type,
                use_auth_token=True,
            )
        print(f"Uploaded '{path_in_repo}' with direct upload.")

def create_and_upload_dataset(language):
    # Define constants
    org_name = "ScandLM"
    dataset_name = f"{language}_culturax"
    repo_id = f"{org_name}/{dataset_name}"
    jsonl_file = f"{language}_culturax.jsonl"
    temp_folder = f"temp_{language}"
    jsonl_folder = os.path.join(temp_folder, "jsonl")
    data_folder = os.path.join(temp_folder, "data")
    src_folder = os.path.join(temp_folder, "src")

    # Language codes
    language_codes = {"danish": "da", "swedish": "sv", "norwegian": "no", "nynorsk": "nn"}
    language_code = language_codes.get(language, "unknown")

    # YAML front matter
    yaml_tags = (
        f"---\n"
        f"language: [{language_code}]\n"
        f"---\n\n"
        f"# {language.capitalize()} Culturax Dataset\n\n"
        f"This dataset is simply a reformatting of uonlp/CulturaX. "
        f"Some minor formatting errors have been corrected.\n\n"
        f"## Usage\n\n"
        f"```python\n"
        f"from datasets import load_dataset\n\n"
        f"dataset = load_dataset(\"ScandLM/{language}_culturax\")\n"
        f"```\n"
    )

    # Verify JSONL file
    if not os.path.exists(jsonl_file):
        raise FileNotFoundError(f"The file '{jsonl_file}' was not found.")

    # Clean data and create a temporary JSONL file
    cleaned_data = clean_jsonl_data(jsonl_file)
    os.makedirs(jsonl_folder, exist_ok=True)
    cleaned_jsonl_file = os.path.join(jsonl_folder, f"cleaned_{jsonl_file}")
    with open(cleaned_jsonl_file, "w", encoding="utf-8") as f:
        for entry in cleaned_data:
            json.dump(entry, f)
            f.write("\n")

    # Split JSONL if too large
    jsonl_shards = split_jsonl_file(cleaned_jsonl_file, os.path.join(jsonl_folder, language), max_size_gb=45)

    # Load data into Dataset
    dataset = Dataset.from_json(cleaned_jsonl_file)

    # Estimate and create Parquet shards
    num_shards = estimate_num_shards(cleaned_jsonl_file, target_shard_size_gb=1)
    print(f"Number of Parquet shards: {num_shards}")

    os.makedirs(data_folder, exist_ok=True)
    parquet_files = []
    for shard_id in range(num_shards):
        shard = dataset.shard(num_shards=num_shards, index=shard_id)
        parquet_file = os.path.join(data_folder, f"train-{shard_id:05d}-of-{num_shards:05d}.parquet")
        shard.to_parquet(parquet_file)
        parquet_files.append(parquet_file)
        print(f"Parquet file created: {parquet_file}")

    # Authenticate with Hugging Face
    api = HfApi()

    # Create dataset repo
    api.create_repo(repo_id=repo_id, repo_type="dataset", private=False, exist_ok=True)
    print(f"Dataset repository '{repo_id}' created successfully.")

    # Upload Parquet files
    for parquet_file in parquet_files:
        upload_large_file(
            file_path=parquet_file,
            repo_id=repo_id,
            path_in_repo=f"data/{os.path.basename(parquet_file)}",
        )

    # Upload JSONL shards
    for shard_file in jsonl_shards:
        upload_large_file(
            file_path=shard_file,
            repo_id=repo_id,
            path_in_repo=f"jsonl/{os.path.basename(shard_file)}",
        )

    # Upload README
    readme_path = os.path.join(temp_folder, "README.md")
    with open(readme_path, "w", encoding="utf-8") as f:
        f.write(yaml_tags)

    upload_file(
        path_or_fileobj=readme_path,
        path_in_repo="README.md",
        repo_id=repo_id,
        repo_type="dataset",
        use_auth_token=True
    )
    print("README.md uploaded successfully.")

    # Upload scripts
    os.makedirs(src_folder, exist_ok=True)
    for script in ["download_culturax.py", "upload_culturax.py"]:
        if os.path.exists(script):
            upload_large_file(
                file_path=script,
                repo_id=repo_id,
                path_in_repo=f"src/{script}",
            )

    # Clean up temporary files
    if os.path.exists(readme_path):
        os.remove(readme_path)

    # Remove directories
    shutil.rmtree(temp_folder, ignore_errors=True)

    print("Dataset setup complete!")

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Upload a cultural dataset to Hugging Face.")
    parser.add_argument("language", type=str, help="The language for the dataset (e.g., danish, swedish, norwegian, nynorsk).")
    args = parser.parse_args()
    create_and_upload_dataset(args.language)