Datasets:

cvejoski commited on
Commit
0425370
·
verified ·
1 Parent(s): 8e65b81

Upload folder using huggingface_hub

Browse files
.DS_Store ADDED
Binary file (8.2 kB). View file
 
README.md CHANGED
@@ -1,7 +0,0 @@
1
- ---
2
- license: mit
3
- task_categories:
4
- - time-series-forecasting
5
- size_categories:
6
- - n<1K
7
- ---
 
 
 
 
 
 
 
 
data/.DS_Store ADDED
Binary file (8.2 kB). View file
 
data/DFR/.DS_Store ADDED
Binary file (8.2 kB). View file
 
data/DFR/6_st_DFR_V=0.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:175de0763dbdb48a1778ad4990cfa60fc9a9a514d3ceecd4b10e2e0fb3791ba8
3
+ size 262531
data/DFR/6_st_DFR_V=1.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f03231707d2c563d190fb86eb018198f85763693f68549fc151afae11c16bf2f
3
+ size 253358
data/DFR/6_st_DFR_V=2.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da839ea50fce818abe7880644f8ebb27e54bcbfe96c17fd7398bd3db2e01f4d8
3
+ size 236738
data/DFR/6_st_DFR_V=3.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fdda83c5dfcd119c0baf4b3125487de13170d67a5da81b83c47943f4d1c32ab
3
+ size 223701
mjp.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Collection of datasets for the MJP."""
15
+
16
+ import pathlib
17
+ from collections import defaultdict
18
+ from dataclasses import dataclass
19
+ from typing import Optional
20
+
21
+ import datasets
22
+ import torch
23
+
24
+ from fim.data.utils import load_file
25
+ from fim.typing import Path, Paths
26
+
27
+
28
+ # TODO: Add BibTeX citation
29
+ # Find for instance the citation on arxiv or on the dataset repo/website
30
+ _CITATION = """\
31
+ @InProceedings{huggingface:dataset,
32
+ title = {A great new dataset},
33
+ author={huggingface, Inc.
34
+ },
35
+ year={2020}
36
+ }
37
+ """
38
+
39
+ # TODO: Add description of the dataset here
40
+ # You can copy an official description
41
+ _DESCRIPTION = """\
42
+ This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
43
+ """
44
+
45
+ # TODO: Add a link to an official homepage for the dataset here
46
+ _HOMEPAGE = ""
47
+
48
+ # TODO: Add the licence for the dataset here if you can find it
49
+ _LICENSE = ""
50
+
51
+ # TODO: Add link to the official dataset URLs here
52
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
53
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
54
+ # _URLS = {
55
+ # "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
56
+ # "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
57
+ # }
58
+
59
+ _ROOT_URL = "data/DFR"
60
+
61
+
62
+ @dataclass
63
+ class MJPDatasetsBuilderConfig(datasets.BuilderConfig):
64
+ """MJPDatasets builder config.."""
65
+
66
+ file_name: Optional[str] = None
67
+
68
+
69
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
70
+ class MJP(datasets.GeneratorBasedBuilder):
71
+ """TODO: Short description of my dataset."""
72
+
73
+ VERSION = datasets.Version("1.1.0")
74
+
75
+ # This is an example of a dataset with multiple configurations.
76
+ # If you don't want/need to define several sub-sets in your dataset,
77
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
78
+
79
+ # If you need to make complex sub-parts in the datasets with configurable options
80
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
81
+ BUILDER_CONFIG_CLASS = MJPDatasetsBuilderConfig
82
+
83
+ # You will be able to load one or the other configurations in the following list with
84
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
85
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
86
+ BUILDER_CONFIGS = [
87
+ MJPDatasetsBuilderConfig(
88
+ name="DFR_V=0",
89
+ file_name="6_st_DFR_V=0.zip",
90
+ version=VERSION,
91
+ description="This part of my dataset covers a first domain",
92
+ ),
93
+ MJPDatasetsBuilderConfig(
94
+ name="DFR_V=1",
95
+ file_name="6_st_DFR_V=1.zip",
96
+ version=VERSION,
97
+ description="This part of my dataset covers a first domain",
98
+ ),
99
+ MJPDatasetsBuilderConfig(
100
+ name="DFR_V=2",
101
+ file_name="6_st_DFR_V=2.zip",
102
+ version=VERSION,
103
+ description="This part of my dataset covers a first domain",
104
+ ),
105
+ MJPDatasetsBuilderConfig(
106
+ name="DFR_V=3",
107
+ file_name="6_st_DFR_V=3.zip",
108
+ version=VERSION,
109
+ description="This part of my dataset covers a first domain",
110
+ ),
111
+ ]
112
+
113
+ DEFAULT_CONFIG_NAME = "DFR_V=0"
114
+
115
+ files_to_load = {
116
+ "observation_times": "fine_grid_grid.pt",
117
+ "observation_values": "fine_grid_noisy_sample_paths.pt",
118
+ "time_normalization_factors": "fine_grid_time_normalization_factors.pt",
119
+ "sequence_lengths": "fine_grid_mask_seq_lengths.pt",
120
+ "ground_truth_intensity_matrices": "fine_grid_intensity_matrices.pt",
121
+ "adjacency_matrices": "fine_grid_adjacency_matrices.pt",
122
+ "ground_truth_initial_distributions": "fine_grid_initial_distributions.pt",
123
+ }
124
+
125
+ def _info(self):
126
+ features = datasets.Features(
127
+ {
128
+ "observation_times": datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value("float32")))),
129
+ "observation_values": datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value("uint32")))),
130
+ "time_normalization_factors": datasets.Value("float32"),
131
+ "sequence_lengths": datasets.Sequence(datasets.Sequence(datasets.Value("int32"))),
132
+ "ground_truth_intensity_matrices": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))),
133
+ "adjacency_matrices": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))),
134
+ "ground_truth_initial_distributions": datasets.Sequence(datasets.Value("uint64")),
135
+ }
136
+ )
137
+
138
+ return datasets.DatasetInfo(
139
+ description=_DESCRIPTION,
140
+ features=features,
141
+ homepage=_HOMEPAGE,
142
+ license=_LICENSE,
143
+ citation=_CITATION,
144
+ )
145
+
146
+ def _split_generators(self, dl_manager):
147
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
148
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
149
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
150
+ urls = f"{_ROOT_URL}/{self.config.file_name}"
151
+ data_dir = dl_manager.download_and_extract(urls)
152
+
153
+ return [
154
+ datasets.SplitGenerator(
155
+ name=datasets.Split.TRAIN,
156
+ # These kwargs will be passed to _generate_examples
157
+ gen_kwargs={"datadir": pathlib.Path(data_dir) / self.config.file_name.split(".")[0]},
158
+ )
159
+ ]
160
+
161
+ def __get_files(self, path: Path) -> Paths:
162
+ files_to_load = [(key, pathlib.Path(path) / file_name) for key, file_name in self.files_to_load.items()]
163
+ return files_to_load
164
+
165
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
166
+ def _generate_examples(self, datadir):
167
+ data = defaultdict(list)
168
+ files_to_load = self.__get_files(datadir)
169
+ for key, file_path in files_to_load:
170
+ data[key].append(load_file(file_path))
171
+ for k, v in data.items():
172
+ data[k] = torch.cat(v)
173
+ print(k, data[k].shape)
174
+ for id in range(len(data["observation_times"])):
175
+ yield id, {k: v[id].tolist() for k, v in data.items() if k in self.info.features}