dgcnz commited on
Commit
16e1eec
1 Parent(s): c00968c

feat: add data

Browse files
.gitignore ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ .venv
2
+ **/__pycache__
3
+
4
+ datasets/jhtdb/tmp
5
+ **/.DS_Store
README.md CHANGED
@@ -1,3 +1,35 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ dataset_info:
3
+ config_name: small_50
4
+ features:
5
+ - name: lrs
6
+ sequence:
7
+ array4_d:
8
+ shape:
9
+ - 3
10
+ - 4
11
+ - 4
12
+ - 4
13
+ dtype: float32
14
+ - name: hr
15
+ dtype:
16
+ array4_d:
17
+ shape:
18
+ - 3
19
+ - 16
20
+ - 16
21
+ - 16
22
+ dtype: float32
23
+ splits:
24
+ - name: train
25
+ num_bytes: 2220320
26
+ num_examples: 40
27
+ - name: validation
28
+ num_bytes: 277540
29
+ num_examples: 5
30
+ - name: test
31
+ num_bytes: 277540
32
+ num_examples: 5
33
+ download_size: 2645696
34
+ dataset_size: 2775400
35
+ ---
datasets/jhtdb/small_50/metadata_test.csv ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ time_step,window_size,sx,sy,sz,ex,ey,ez,lr_factor,hr_path,lr_paths
2
+ 512,3,866,214,631,881,229,646,4,datasets/jhtdb/small_50/test/512_866_214_631_881_229_646_1_1_1_1.npy,"[""datasets/jhtdb/small_50/test/511_866_214_631_881_229_646_4_4_4_4.npy"", ""datasets/jhtdb/small_50/test/512_866_214_631_881_229_646_4_4_4_4.npy"", ""datasets/jhtdb/small_50/test/513_866_214_631_881_229_646_4_4_4_4.npy""]"
3
+ 367,3,863,412,291,878,427,306,4,datasets/jhtdb/small_50/test/367_863_412_291_878_427_306_1_1_1_1.npy,"[""datasets/jhtdb/small_50/test/366_863_412_291_878_427_306_4_4_4_4.npy"", ""datasets/jhtdb/small_50/test/367_863_412_291_878_427_306_4_4_4_4.npy"", ""datasets/jhtdb/small_50/test/368_863_412_291_878_427_306_4_4_4_4.npy""]"
4
+ 384,3,994,589,681,1009,604,696,4,datasets/jhtdb/small_50/test/384_994_589_681_1009_604_696_1_1_1_1.npy,"[""datasets/jhtdb/small_50/test/383_994_589_681_1009_604_696_4_4_4_4.npy"", ""datasets/jhtdb/small_50/test/384_994_589_681_1009_604_696_4_4_4_4.npy"", ""datasets/jhtdb/small_50/test/385_994_589_681_1009_604_696_4_4_4_4.npy""]"
5
+ 324,3,900,107,838,915,122,853,4,datasets/jhtdb/small_50/test/324_900_107_838_915_122_853_1_1_1_1.npy,"[""datasets/jhtdb/small_50/test/323_900_107_838_915_122_853_4_4_4_4.npy"", ""datasets/jhtdb/small_50/test/324_900_107_838_915_122_853_4_4_4_4.npy"", ""datasets/jhtdb/small_50/test/325_900_107_838_915_122_853_4_4_4_4.npy""]"
6
+ 990,3,577,844,419,592,859,434,4,datasets/jhtdb/small_50/test/990_577_844_419_592_859_434_1_1_1_1.npy,"[""datasets/jhtdb/small_50/test/989_577_844_419_592_859_434_4_4_4_4.npy"", ""datasets/jhtdb/small_50/test/990_577_844_419_592_859_434_4_4_4_4.npy"", ""datasets/jhtdb/small_50/test/991_577_844_419_592_859_434_4_4_4_4.npy""]"
datasets/jhtdb/small_50/metadata_train.csv ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ time_step,window_size,sx,sy,sz,ex,ey,ez,lr_factor,hr_path,lr_paths
2
+ 512,3,866,214,631,881,229,646,4,datasets/jhtdb/small_50/train/512_866_214_631_881_229_646_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/511_866_214_631_881_229_646_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/512_866_214_631_881_229_646_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/513_866_214_631_881_229_646_4_4_4_4.npy""]"
3
+ 367,3,863,412,291,878,427,306,4,datasets/jhtdb/small_50/train/367_863_412_291_878_427_306_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/366_863_412_291_878_427_306_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/367_863_412_291_878_427_306_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/368_863_412_291_878_427_306_4_4_4_4.npy""]"
4
+ 384,3,994,589,681,1009,604,696,4,datasets/jhtdb/small_50/train/384_994_589_681_1009_604_696_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/383_994_589_681_1009_604_696_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/384_994_589_681_1009_604_696_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/385_994_589_681_1009_604_696_4_4_4_4.npy""]"
5
+ 324,3,900,107,838,915,122,853,4,datasets/jhtdb/small_50/train/324_900_107_838_915_122_853_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/323_900_107_838_915_122_853_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/324_900_107_838_915_122_853_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/325_900_107_838_915_122_853_4_4_4_4.npy""]"
6
+ 990,3,577,844,419,592,859,434,4,datasets/jhtdb/small_50/train/990_577_844_419_592_859_434_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/989_577_844_419_592_859_434_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/990_577_844_419_592_859_434_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/991_577_844_419_592_859_434_4_4_4_4.npy""]"
7
+ 100,3,827,395,791,842,410,806,4,datasets/jhtdb/small_50/train/100_827_395_791_842_410_806_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/99_827_395_791_842_410_806_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/100_827_395_791_842_410_806_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/101_827_395_791_842_410_806_4_4_4_4.npy""]"
8
+ 744,3,718,147,485,733,162,500,4,datasets/jhtdb/small_50/train/744_718_147_485_733_162_500_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/743_718_147_485_733_162_500_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/744_718_147_485_733_162_500_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/745_718_147_485_733_162_500_4_4_4_4.npy""]"
9
+ 19,3,272,1004,412,287,1019,427,4,datasets/jhtdb/small_50/train/19_272_1004_412_287_1019_427_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/18_272_1004_412_287_1019_427_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/19_272_1004_412_287_1019_427_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/20_272_1004_412_287_1019_427_4_4_4_4.npy""]"
10
+ 597,3,766,1005,159,781,1020,174,4,datasets/jhtdb/small_50/train/597_766_1005_159_781_1020_174_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/596_766_1005_159_781_1020_174_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/597_766_1005_159_781_1020_174_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/598_766_1005_159_781_1020_174_4_4_4_4.npy""]"
11
+ 108,3,181,583,366,196,598,381,4,datasets/jhtdb/small_50/train/108_181_583_366_196_598_381_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/107_181_583_366_196_598_381_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/108_181_583_366_196_598_381_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/109_181_583_366_196_598_381_4_4_4_4.npy""]"
12
+ 125,3,372,155,721,387,170,736,4,datasets/jhtdb/small_50/train/125_372_155_721_387_170_736_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/124_372_155_721_387_170_736_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/125_372_155_721_387_170_736_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/126_372_155_721_387_170_736_4_4_4_4.npy""]"
13
+ 571,3,391,783,256,406,798,271,4,datasets/jhtdb/small_50/train/571_391_783_256_406_798_271_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/570_391_783_256_406_798_271_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/571_391_783_256_406_798_271_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/572_391_783_256_406_798_271_4_4_4_4.npy""]"
14
+ 216,3,245,360,972,260,375,987,4,datasets/jhtdb/small_50/train/216_245_360_972_260_375_987_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/215_245_360_972_260_375_987_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/216_245_360_972_260_375_987_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/217_245_360_972_260_375_987_4_4_4_4.npy""]"
15
+ 739,3,951,968,130,966,983,145,4,datasets/jhtdb/small_50/train/739_951_968_130_966_983_145_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/738_951_968_130_966_983_145_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/739_951_968_130_966_983_145_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/740_951_968_130_966_983_145_4_4_4_4.npy""]"
16
+ 98,3,556,187,358,571,202,373,4,datasets/jhtdb/small_50/train/98_556_187_358_571_202_373_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/97_556_187_358_571_202_373_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/98_556_187_358_571_202_373_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/99_556_187_358_571_202_373_4_4_4_4.npy""]"
17
+ 115,3,696,538,435,711,553,450,4,datasets/jhtdb/small_50/train/115_696_538_435_711_553_450_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/114_696_538_435_711_553_450_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/115_696_538_435_711_553_450_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/116_696_538_435_711_553_450_4_4_4_4.npy""]"
18
+ 640,3,981,825,306,996,840,321,4,datasets/jhtdb/small_50/train/640_981_825_306_996_840_321_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/639_981_825_306_996_840_321_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/640_981_825_306_996_840_321_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/641_981_825_306_996_840_321_4_4_4_4.npy""]"
19
+ 49,3,781,531,466,796,546,481,4,datasets/jhtdb/small_50/train/49_781_531_466_796_546_481_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/48_781_531_466_796_546_481_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/49_781_531_466_796_546_481_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/50_781_531_466_796_546_481_4_4_4_4.npy""]"
20
+ 75,3,514,52,941,529,67,956,4,datasets/jhtdb/small_50/train/75_514_52_941_529_67_956_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/74_514_52_941_529_67_956_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/75_514_52_941_529_67_956_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/76_514_52_941_529_67_956_4_4_4_4.npy""]"
21
+ 546,3,886,746,305,901,761,320,4,datasets/jhtdb/small_50/train/546_886_746_305_901_761_320_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/545_886_746_305_901_761_320_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/546_886_746_305_901_761_320_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/547_886_746_305_901_761_320_4_4_4_4.npy""]"
22
+ 944,3,57,988,946,72,1003,961,4,datasets/jhtdb/small_50/train/944_57_988_946_72_1003_961_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/943_57_988_946_72_1003_961_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/944_57_988_946_72_1003_961_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/945_57_988_946_72_1003_961_4_4_4_4.npy""]"
23
+ 226,3,343,516,68,358,531,83,4,datasets/jhtdb/small_50/train/226_343_516_68_358_531_83_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/225_343_516_68_358_531_83_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/226_343_516_68_358_531_83_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/227_343_516_68_358_531_83_4_4_4_4.npy""]"
24
+ 113,3,140,662,474,155,677,489,4,datasets/jhtdb/small_50/train/113_140_662_474_155_677_489_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/112_140_662_474_155_677_489_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/113_140_662_474_155_677_489_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/114_140_662_474_155_677_489_4_4_4_4.npy""]"
25
+ 411,3,616,490,355,631,505,370,4,datasets/jhtdb/small_50/train/411_616_490_355_631_505_370_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/410_616_490_355_631_505_370_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/411_616_490_355_631_505_370_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/412_616_490_355_631_505_370_4_4_4_4.npy""]"
26
+ 341,3,4,908,260,19,923,275,4,datasets/jhtdb/small_50/train/341_4_908_260_19_923_275_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/340_4_908_260_19_923_275_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/341_4_908_260_19_923_275_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/342_4_908_260_19_923_275_4_4_4_4.npy""]"
27
+ 848,3,756,223,263,771,238,278,4,datasets/jhtdb/small_50/train/848_756_223_263_771_238_278_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/847_756_223_263_771_238_278_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/848_756_223_263_771_238_278_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/849_756_223_263_771_238_278_4_4_4_4.npy""]"
28
+ 255,3,266,88,15,281,103,30,4,datasets/jhtdb/small_50/train/255_266_88_15_281_103_30_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/254_266_88_15_281_103_30_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/255_266_88_15_281_103_30_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/256_266_88_15_281_103_30_4_4_4_4.npy""]"
29
+ 422,3,999,468,890,1014,483,905,4,datasets/jhtdb/small_50/train/422_999_468_890_1014_483_905_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/421_999_468_890_1014_483_905_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/422_999_468_890_1014_483_905_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/423_999_468_890_1014_483_905_4_4_4_4.npy""]"
30
+ 610,3,199,141,439,214,156,454,4,datasets/jhtdb/small_50/train/610_199_141_439_214_156_454_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/609_199_141_439_214_156_454_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/610_199_141_439_214_156_454_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/611_199_141_439_214_156_454_4_4_4_4.npy""]"
31
+ 210,3,540,807,402,555,822,417,4,datasets/jhtdb/small_50/train/210_540_807_402_555_822_417_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/209_540_807_402_555_822_417_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/210_540_807_402_555_822_417_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/211_540_807_402_555_822_417_4_4_4_4.npy""]"
32
+ 70,3,958,879,843,973,894,858,4,datasets/jhtdb/small_50/train/70_958_879_843_973_894_858_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/69_958_879_843_973_894_858_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/70_958_879_843_973_894_858_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/71_958_879_843_973_894_858_4_4_4_4.npy""]"
33
+ 819,3,868,629,100,883,644,115,4,datasets/jhtdb/small_50/train/819_868_629_100_883_644_115_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/818_868_629_100_883_644_115_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/819_868_629_100_883_644_115_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/820_868_629_100_883_644_115_4_4_4_4.npy""]"
34
+ 825,3,194,816,494,209,831,509,4,datasets/jhtdb/small_50/train/825_194_816_494_209_831_509_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/824_194_816_494_209_831_509_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/825_194_816_494_209_831_509_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/826_194_816_494_209_831_509_4_4_4_4.npy""]"
35
+ 453,3,17,646,361,32,661,376,4,datasets/jhtdb/small_50/train/453_17_646_361_32_661_376_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/452_17_646_361_32_661_376_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/453_17_646_361_32_661_376_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/454_17_646_361_32_661_376_4_4_4_4.npy""]"
36
+ 4,3,110,87,47,125,102,62,4,datasets/jhtdb/small_50/train/4_110_87_47_125_102_62_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/3_110_87_47_125_102_62_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/4_110_87_47_125_102_62_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/5_110_87_47_125_102_62_4_4_4_4.npy""]"
37
+ 342,3,272,316,297,287,331,312,4,datasets/jhtdb/small_50/train/342_272_316_297_287_331_312_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/341_272_316_297_287_331_312_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/342_272_316_297_287_331_312_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/343_272_316_297_287_331_312_4_4_4_4.npy""]"
38
+ 41,3,410,302,434,425,317,449,4,datasets/jhtdb/small_50/train/41_410_302_434_425_317_449_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/40_410_302_434_425_317_449_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/41_410_302_434_425_317_449_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/42_410_302_434_425_317_449_4_4_4_4.npy""]"
39
+ 324,3,769,420,926,784,435,941,4,datasets/jhtdb/small_50/train/324_769_420_926_784_435_941_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/323_769_420_926_784_435_941_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/324_769_420_926_784_435_941_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/325_769_420_926_784_435_941_4_4_4_4.npy""]"
40
+ 598,3,386,980,837,401,995,852,4,datasets/jhtdb/small_50/train/598_386_980_837_401_995_852_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/597_386_980_837_401_995_852_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/598_386_980_837_401_995_852_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/599_386_980_837_401_995_852_4_4_4_4.npy""]"
41
+ 561,3,927,904,862,942,919,877,4,datasets/jhtdb/small_50/train/561_927_904_862_942_919_877_1_1_1_1.npy,"[""datasets/jhtdb/small_50/train/560_927_904_862_942_919_877_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/561_927_904_862_942_919_877_4_4_4_4.npy"", ""datasets/jhtdb/small_50/train/562_927_904_862_942_919_877_4_4_4_4.npy""]"
datasets/jhtdb/small_50/metadata_val.csv ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ time_step,window_size,sx,sy,sz,ex,ey,ez,lr_factor,hr_path,lr_paths
2
+ 512,3,866,214,631,881,229,646,4,datasets/jhtdb/small_50/val/512_866_214_631_881_229_646_1_1_1_1.npy,"[""datasets/jhtdb/small_50/val/511_866_214_631_881_229_646_4_4_4_4.npy"", ""datasets/jhtdb/small_50/val/512_866_214_631_881_229_646_4_4_4_4.npy"", ""datasets/jhtdb/small_50/val/513_866_214_631_881_229_646_4_4_4_4.npy""]"
3
+ 367,3,863,412,291,878,427,306,4,datasets/jhtdb/small_50/val/367_863_412_291_878_427_306_1_1_1_1.npy,"[""datasets/jhtdb/small_50/val/366_863_412_291_878_427_306_4_4_4_4.npy"", ""datasets/jhtdb/small_50/val/367_863_412_291_878_427_306_4_4_4_4.npy"", ""datasets/jhtdb/small_50/val/368_863_412_291_878_427_306_4_4_4_4.npy""]"
4
+ 384,3,994,589,681,1009,604,696,4,datasets/jhtdb/small_50/val/384_994_589_681_1009_604_696_1_1_1_1.npy,"[""datasets/jhtdb/small_50/val/383_994_589_681_1009_604_696_4_4_4_4.npy"", ""datasets/jhtdb/small_50/val/384_994_589_681_1009_604_696_4_4_4_4.npy"", ""datasets/jhtdb/small_50/val/385_994_589_681_1009_604_696_4_4_4_4.npy""]"
5
+ 324,3,900,107,838,915,122,853,4,datasets/jhtdb/small_50/val/324_900_107_838_915_122_853_1_1_1_1.npy,"[""datasets/jhtdb/small_50/val/323_900_107_838_915_122_853_4_4_4_4.npy"", ""datasets/jhtdb/small_50/val/324_900_107_838_915_122_853_4_4_4_4.npy"", ""datasets/jhtdb/small_50/val/325_900_107_838_915_122_853_4_4_4_4.npy""]"
6
+ 990,3,577,844,419,592,859,434,4,datasets/jhtdb/small_50/val/990_577_844_419_592_859_434_1_1_1_1.npy,"[""datasets/jhtdb/small_50/val/989_577_844_419_592_859_434_4_4_4_4.npy"", ""datasets/jhtdb/small_50/val/990_577_844_419_592_859_434_4_4_4_4.npy"", ""datasets/jhtdb/small_50/val/991_577_844_419_592_859_434_4_4_4_4.npy""]"
datasets/jhtdb/small_50/test.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f601e08c5b36fd2966f82ce201b943aed1964a79e52d5d9260c8efdcc12f8182
3
+ size 262950
datasets/jhtdb/small_50/train.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83a53edfcac9a88ed6b80ad7fe26735f9b96c5b311d17c604c63fd88609ac173
3
+ size 2103236
datasets/jhtdb/small_50/val.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f601e08c5b36fd2966f82ce201b943aed1964a79e52d5d9260c8efdcc12f8182
3
+ size 262950
jhtdb.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TODO: Add a description here."""
2
+
3
+ import csv
4
+ import json
5
+ import os
6
+ import numpy as np
7
+ from pathlib import Path
8
+ import datasets
9
+
10
+
11
+ # TODO: Add BibTeX citation
12
+ # Find for instance the citation on arxiv or on the dataset repo/website
13
+ _CITATION = """\
14
+ @InProceedings{huggingface:dataset,
15
+ title = {A great new dataset},
16
+ author={huggingface, Inc.
17
+ },
18
+ year={2020}
19
+ }
20
+ """
21
+
22
+ # TODO: Add description of the dataset here
23
+ # You can copy an official description
24
+ _DESCRIPTION = """\
25
+ This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
26
+ """
27
+
28
+ # TODO: Add a link to an official homepage for the dataset here
29
+ _HOMEPAGE = ""
30
+
31
+ # TODO: Add the licence for the dataset here if you can find it
32
+ _LICENSE = ""
33
+
34
+ # TODO: Add link to the official dataset URLs here
35
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
36
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
37
+ _URLS = {
38
+ # "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
39
+ # "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
40
+ "small_50": {
41
+ "train": (
42
+ "datasets/jhtdb/small_50/metadata_train.csv",
43
+ "datasets/jhtdb/small_50/train.zip",
44
+ ),
45
+ "val": (
46
+ "datasets/jhtdb/small_50/metadata_val.csv",
47
+ "datasets/jhtdb/small_50/val.zip",
48
+ ),
49
+ "test": (
50
+ "datasets/jhtdb/small_50/metadata_test.csv",
51
+ "datasets/jhtdb/small_50/test.zip",
52
+ ),
53
+ }
54
+ }
55
+
56
+
57
+ class JHTDB(datasets.GeneratorBasedBuilder):
58
+ """TODO: Short description of my dataset."""
59
+
60
+ VERSION = datasets.Version("1.1.0")
61
+ BUILDER_CONFIGS = [
62
+ datasets.BuilderConfig(name="small_50", version=VERSION, description=""),
63
+ ]
64
+
65
+ DEFAULT_CONFIG_NAME = "small_50"
66
+
67
+ def _info(self):
68
+ if self.config.name.startswith("small"):
69
+ features = datasets.Features(
70
+ {
71
+ "lrs": datasets.Sequence(
72
+ datasets.Array4D(shape=(3, 4, 4, 4), dtype="float32"),
73
+ ),
74
+ "hr": datasets.Array4D(shape=(3, 16, 16, 16), dtype="float32"),
75
+ }
76
+ )
77
+ elif self.config.name.startswith("large"):
78
+ features = datasets.Features(
79
+ {
80
+ "lrs": datasets.Sequence(
81
+ datasets.Array4D(shape=(3, 16, 16, 16), dtype="float32"),
82
+ ),
83
+ "hr": datasets.Array4D(shape=(3, 64, 64, 64), dtype="float32"),
84
+ }
85
+ )
86
+ return datasets.DatasetInfo(
87
+ description=_DESCRIPTION,
88
+ features=features,
89
+ homepage=_HOMEPAGE,
90
+ license=_LICENSE,
91
+ citation=_CITATION,
92
+ )
93
+
94
+ def _split_generators(self, dl_manager):
95
+ urls = _URLS[self.config.name]
96
+ data_dir = dl_manager.download_and_extract(urls)
97
+ named_splits = {
98
+ "train": datasets.Split.TRAIN,
99
+ "val": datasets.Split.VALIDATION,
100
+ "test": datasets.Split.TEST,
101
+ }
102
+ return [
103
+ datasets.SplitGenerator(
104
+ name=named_splits[split],
105
+ gen_kwargs={
106
+ "metadata_path": Path(metadata_path),
107
+ "data_path": Path(data_path),
108
+ },
109
+ )
110
+ for split, (metadata_path, data_path) in data_dir.items()
111
+ ]
112
+
113
+ def _generate_examples(self, metadata_path: Path, data_path: Path):
114
+ with open(metadata_path) as f:
115
+ reader = csv.DictReader(f)
116
+ for key, data in enumerate(reader):
117
+ yield key, {
118
+ "lrs": [
119
+ np.load(data_path / Path(p).name)
120
+ for p in json.loads(data["lr_paths"])
121
+ ],
122
+ "hr": np.load(data_path / Path(data["hr_path"]).name),
123
+ }
poetry.lock ADDED
The diff for this file is too large to render. See raw diff
 
pyproject.toml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.poetry]
2
+ name = "jhtdb"
3
+ version = "0.1.0"
4
+ description = ""
5
+ authors = ["Diego Canez <[email protected]>"]
6
+ readme = "README.md"
7
+
8
+ [tool.poetry.dependencies]
9
+ python = ">=3.9,<3.12"
10
+ tqdm = "^4.66.4"
11
+ torch = "^2.3.0"
12
+ datasets = "^2.19.1"
13
+
14
+
15
+ [tool.poetry.group.dev.dependencies]
16
+ pyjhtdb = {git = "https://github.com/dgcnz/pyJHTDB"}
17
+ isort = "^5.13.2"
18
+ black = "^24.4.2"
19
+ flake8 = "^7.0.0"
20
+ jsonargparse = "^4.28.0"
21
+ pandas = "^2.2.2"
22
+
23
+ [build-system]
24
+ requires = ["poetry-core"]
25
+ build-backend = "poetry.core.masonry.api"
scripts/generate.py ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import json
3
+ from jsonargparse import CLI
4
+ import pandas as pd
5
+
6
+ import pyJHTDB
7
+ import pyJHTDB.dbinfo
8
+ from tqdm import tqdm
9
+ from pathlib import Path
10
+ from itertools import chain
11
+ import zipfile
12
+
13
+
14
+ def get_filename(
15
+ time_step: int,
16
+ start: np.ndarray, # [x, y, z]
17
+ end: np.ndarray, # [x, y, z]
18
+ step: np.ndarray, # [x, y, z]
19
+ filter_width: int,
20
+ ):
21
+ """Serializes jhtdb params into a filename."""
22
+ return "{0}_{1}_{2}_{3}_{4}_{5}_{6}_{7}_{8}_{9}_{10}.npy".format(
23
+ time_step,
24
+ start[0],
25
+ start[1],
26
+ start[2],
27
+ end[0],
28
+ end[1],
29
+ end[2],
30
+ step[0],
31
+ step[1],
32
+ step[2],
33
+ filter_width,
34
+ )
35
+
36
+
37
+ def download_jhtdb(
38
+ loader: pyJHTDB.libJHTDB,
39
+ time_step: int,
40
+ start: np.ndarray,
41
+ end: np.ndarray,
42
+ step: np.ndarray,
43
+ filter_width: int,
44
+ path: Path,
45
+ dataset: str = "isotropic1024coarse",
46
+ field: str = "u",
47
+ ):
48
+ """
49
+ :param loader: pyJHTDB.libJHTDB object
50
+ :param time_step: time step to download
51
+ :param start: start [x, y, z] of the cutout
52
+ :param end: end [x, y, z] of the cutout
53
+ :param step: step size of the cutout
54
+ :param filter_width: filter width of the cutout
55
+ :param path: path to save the data
56
+ :param dataset: dataset to download from. Default is "isotropic1024coarse"
57
+ :param field: velocity ("u") or pressure ("p") field
58
+ """
59
+ if not path.exists():
60
+ results: np.ndarray = loader.getCutout(
61
+ data_set=dataset,
62
+ field=field,
63
+ time_step=time_step,
64
+ start=start,
65
+ end=end,
66
+ step=step,
67
+ filter_width=filter_width,
68
+ )
69
+ if results is None:
70
+ raise Exception("Could not download data from JHTDB")
71
+ results = np.rollaxis(
72
+ results, -1, 0
73
+ ) # Move the [x, y, z] dimensions to the front
74
+ np.save(path, results)
75
+ return np.load(path)
76
+
77
+
78
+ def download_all(params: dict, loader: pyJHTDB.libJHTDB):
79
+ """Download all the data from the JHTDB database.
80
+ TODO: parallelize this function
81
+ """
82
+ for p in tqdm(params):
83
+ download_jhtdb(loader=loader, **p)
84
+
85
+
86
+ def get_params(
87
+ total_samples: int,
88
+ domain_size: int,
89
+ lr_factor: int,
90
+ time_range: list[int],
91
+ window_size: int,
92
+ ) -> tuple[dict, dict]:
93
+ dt = np.arange(window_size) - window_size // 2
94
+ time_steps_hr = np.random.randint(time_range[0], time_range[1], size=total_samples)
95
+ # reshape time_steps to (total_samples, len(dt)) so that for each i we can get time_steps[i] + dt[j]
96
+ time_steps_lr = np.repeat(time_steps_hr[:, np.newaxis], len(dt), axis=1) + dt
97
+ # time_steps.shape = [total_samples, window_size]
98
+ starts = np.random.randint(1, 1024 - domain_size, size=(total_samples, 3))
99
+ ends = starts + domain_size - 1
100
+ all_params_lr = [
101
+ [
102
+ {
103
+ "time_step": time_steps_lr[i, j],
104
+ "start": starts[i],
105
+ "end": ends[i],
106
+ "step": np.full(3, lr_factor, dtype=int),
107
+ "filter_width": lr_factor,
108
+ }
109
+ for j in range(len(dt))
110
+ ]
111
+ for i in range(total_samples)
112
+ ]
113
+ all_params_hr = [
114
+ {
115
+ "time_step": time_steps_hr[i],
116
+ "start": starts[i],
117
+ "end": ends[i],
118
+ "step": np.ones(3, dtype=int),
119
+ "filter_width": 1,
120
+ }
121
+ for i in range(total_samples)
122
+ ]
123
+ return all_params_lr, all_params_hr
124
+
125
+
126
+ def download_generic(
127
+ total_samples: int,
128
+ domain_size: int,
129
+ lr_factor: int,
130
+ time_range: tuple[int, int],
131
+ window_size: int,
132
+ tmp_data_dir: Path,
133
+ token: str,
134
+ ):
135
+ """Download all the data from the JHTDB database."""
136
+ # initialize runner
137
+ lJHTDB = pyJHTDB.libJHTDB()
138
+ lJHTDB.initialize()
139
+ lJHTDB.add_token(token)
140
+ tmp_data_dir.mkdir(parents=True, exist_ok=True)
141
+
142
+ all_params_lr, all_params_hr = get_params(
143
+ total_samples, domain_size, lr_factor, time_range, window_size
144
+ )
145
+ # add path to all the params
146
+ all_params_hr = [
147
+ dict(p, path=tmp_data_dir / get_filename(**p)) for p in all_params_hr
148
+ ]
149
+ all_params_lr = [
150
+ [dict(p, path=tmp_data_dir / get_filename(**p)) for p in lr]
151
+ for lr in all_params_lr
152
+ ]
153
+
154
+ # all_params_lr.shape = [total_samples, window_size]
155
+ # all_params_hr.shape = [total_samples]
156
+ # flatten nested params
157
+ all_params = list(chain.from_iterable(all_params_lr)) + all_params_hr
158
+ download_all(all_params, lJHTDB)
159
+ return all_params_lr, all_params_hr, all_params
160
+
161
+
162
+ def make_jhtdb_dataset(
163
+ name: str,
164
+ total_samples: int = 128,
165
+ train_split: float = 0.8,
166
+ val_split: float = 0.1,
167
+ test_split: float = 0.1,
168
+ domain_size: int = 64,
169
+ lr_factor: int = 4,
170
+ root: Path = Path("dataset/jhtdb"),
171
+ time_range: tuple[int, int] = (2, 1023),
172
+ window_size: int = 3,
173
+ seed: int = 123,
174
+ token: str = "edu.jhu.pha.turbulence.testing-201311",
175
+ ) -> tuple[np.ndarray, np.ndarray]:
176
+ """Creates low and high res dataset from JHTDB database.
177
+
178
+ Where:
179
+ low_res.shape = [nr_samples, 3, domain_size / lr_factor, domain_size / lr_factor, domain_size / lr_factor]
180
+ high_res.shape = [nr_samples, 3, domain_size, domain_size, domain_size]
181
+ And 3 corresponds to the x, y, z components of the velocity field.
182
+
183
+ Make a dataset from the JHTDB database.
184
+ :param: name: name of the dataset
185
+ :param: total_samples: total number of samples to generate
186
+ :param: train_split: percentage of samples to use for training
187
+ :param: val_split: percentage of samples to use for validation
188
+ :param: test_split: percentage of samples to use for testing
189
+ :param: domain_size: size of the domain to generate
190
+ :param: lr_factor: factor to downsample the data
191
+ :param: root: root directory to store the dataset
192
+ :param: time_range: range of time steps to sample from
193
+ :param: seed: seed to generate the dataset
194
+ :param: window_size: size of the window to sample from
195
+ :param: token: token to access the JHTDB database
196
+ :return: tuple of low res and high res data
197
+ """
198
+ assert window_size % 2 == 1, "Window size must be odd"
199
+ assert time_range[0] - window_size // 2 >= 1, "Time step out of range"
200
+ assert time_range[1] + window_size // 2 <= 1024, "Time step out of range"
201
+ assert time_range[0] >= 1 and time_range[1] <= 1024, "Time step out of range"
202
+
203
+ np.random.seed(seed)
204
+ # download all the data
205
+ tmp_data_dir = root / "tmp"
206
+ all_params_lr, all_params_hr, _ = download_generic(
207
+ total_samples,
208
+ domain_size,
209
+ lr_factor,
210
+ time_range,
211
+ window_size,
212
+ tmp_data_dir,
213
+ token,
214
+ )
215
+ assert len(all_params_lr) == len(all_params_hr), "Length mismatch"
216
+
217
+ # split the data
218
+ cur_root = root / name
219
+ cur_root.mkdir(parents=True, exist_ok=True)
220
+ splits_ratios = [("train", train_split), ("val", val_split), ("test", test_split)]
221
+
222
+ for split, split_ratio in splits_ratios:
223
+ # split data
224
+ split_dir = cur_root / split
225
+ split_params_lr = all_params_lr[: int(total_samples * split_ratio)]
226
+ split_params_hr = all_params_hr[: int(total_samples * split_ratio)]
227
+
228
+ # compress all the data to a zip
229
+ split_paths = [
230
+ p["path"]
231
+ for p in split_params_hr + list(chain.from_iterable(split_params_lr))
232
+ ]
233
+ with zipfile.ZipFile(cur_root / f"{split}.zip", "w") as z:
234
+ for p in split_paths:
235
+ z.write(p, p.name)
236
+
237
+ # create metadata
238
+ metadata = []
239
+ for lr, hr in zip(split_params_lr, split_params_hr):
240
+ metadata.append(
241
+ {
242
+ "time_step": hr["time_step"],
243
+ "window_size": window_size,
244
+ "sx": hr["start"][0],
245
+ "sy": hr["start"][1],
246
+ "sz": hr["start"][2],
247
+ "ex": hr["end"][0],
248
+ "ey": hr["end"][1],
249
+ "ez": hr["end"][2],
250
+ "lr_factor": lr_factor,
251
+ "hr_path": str(split_dir / hr["path"].name),
252
+ "lr_paths": json.dumps(
253
+ [str(split_dir / p["path"].name) for p in lr]
254
+ ),
255
+ }
256
+ )
257
+
258
+ metadata_df = pd.DataFrame(metadata)
259
+ metadata_df.to_csv(cur_root / f"metadata_{split}.csv", index=False)
260
+
261
+
262
+ if __name__ == "__main__":
263
+ CLI(make_jhtdb_dataset)
scripts/large_100.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ total_samples: 100
2
+ train_split: 0.8
3
+ val_split: 0.1
4
+ test_split: 0.1
5
+ domain_size: 64
6
+ lr_factor: 4
7
+ root: "datasets/jhtdb"
8
+ name: "large_100"
9
+ time_range:
10
+ - 2
11
+ - 1023
12
+ window_size: 3
13
+ seed: 123
14
+ token: "edu.jhu.pha.turbulence.testing-201311"
scripts/large_50.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ total_samples: 50
2
+ train_split: 0.8
3
+ val_split: 0.1
4
+ test_split: 0.1
5
+ domain_size: 64
6
+ lr_factor: 4
7
+ root: "datasets/jhtdb"
8
+ name: "large_50"
9
+ time_range:
10
+ - 2
11
+ - 1023
12
+ window_size: 3
13
+ seed: 123
14
+ token: "edu.jhu.pha.turbulence.testing-201311"
scripts/small_100.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ total_samples: 100
2
+ train_split: 0.8
3
+ val_split: 0.1
4
+ test_split: 0.1
5
+ domain_size: 16
6
+ lr_factor: 4
7
+ root: "datasets/jhtdb"
8
+ name: "small_100"
9
+ time_range:
10
+ - 2
11
+ - 1023
12
+ window_size: 3
13
+ seed: 123
14
+ token: "edu.jhu.pha.turbulence.testing-201311"
scripts/small_50.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: small_50
2
+ total_samples: 50
3
+ train_split: 0.8
4
+ val_split: 0.1
5
+ test_split: 0.1
6
+ domain_size: 16
7
+ lr_factor: 4
8
+ root: "datasets/jhtdb"
9
+ time_range:
10
+ - 2
11
+ - 1023
12
+ window_size: 3
13
+ seed: 123
14
+ token: "edu.jhu.pha.turbulence.testing-201311"