Datasets:

DOI:
License:
File size: 1,297 Bytes
f04c17e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
# %%
import multiprocessing as mp
import os
from glob import glob

import h5py
import numpy as np
import pandas as pd
from tqdm import tqdm

# %%
data_path = "waveform_h5"
result_path = "waveform_h5"
file_list = sorted(glob(f"{data_path}/*.h5"))
# %%
file_size = {file: os.path.getsize(file)/1e9 for file in file_list}

# %%
MAX_SIZE = 45 # GB
for file, size in file_size.items():
    if size > MAX_SIZE:
        # split into smaller files
        NUM_FILES = int(np.ceil(size / MAX_SIZE))
        with h5py.File(file, "r") as f:
            event_ids = list(f.keys())
            for event_id in tqdm(event_ids, desc=f"Processing {file}"):
                index = int(event_id[-1]) % NUM_FILES
                # with h5py.File(f"{result_path}/{file.split('/')[-1].replace('.h5', '')}_{index}.h5", "a") as g:
                with h5py.File(f"{data_path}/{file.split('/')[-1].replace('.h5', '')}_{index}.h5", "a") as g:
                    # if event_id in g:
                    #     print(f"Event {event_id} already exists in {file.split('/')[-1].replace('.h5', '')}_{index}.h5")
                    #     continue
                    # copy 
                    f.copy(event_id, g)
    # else:
    #     print(f"Copying {file} to {result_path}")
    #     os.system(f"cp {file} {result_path}")