Create test.py
Browse files
test.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datasets
|
2 |
+
import pyarrow.parquet as pq
|
3 |
+
|
4 |
+
_CITATION = ''
|
5 |
+
|
6 |
+
_DESCRIPTION = ''
|
7 |
+
|
8 |
+
_HOMEPAGE = ''
|
9 |
+
|
10 |
+
_LICENSE = ''
|
11 |
+
|
12 |
+
_BASE_URL = 'https://huggingface.co/datasets/AresEkb/test/resolve/main/'
|
13 |
+
|
14 |
+
_FEATURES = {
|
15 |
+
'domains': datasets.Features({
|
16 |
+
'reg_number': datasets.Value('string'),
|
17 |
+
'standard_name': datasets.Value('string'),
|
18 |
+
'name': datasets.Value('string'),
|
19 |
+
'purpose': datasets.Value('string'),
|
20 |
+
'embeddings': datasets.Sequence(datasets.Value('float32')),
|
21 |
+
}),
|
22 |
+
}
|
23 |
+
|
24 |
+
class ProfStandardsDatasetBuilder(datasets.ArrowBasedBuilder):
|
25 |
+
|
26 |
+
VERSION = datasets.Version('0.0.1')
|
27 |
+
|
28 |
+
BUILDER_CONFIGS = [
|
29 |
+
datasets.BuilderConfig('domains', VERSION),
|
30 |
+
]
|
31 |
+
|
32 |
+
def _info(self):
|
33 |
+
return datasets.DatasetInfo(
|
34 |
+
description=_DESCRIPTION,
|
35 |
+
features=_FEATURES[self.config.name],
|
36 |
+
homepage=_HOMEPAGE,
|
37 |
+
license=_LICENSE,
|
38 |
+
citation=_CITATION,
|
39 |
+
)
|
40 |
+
|
41 |
+
def _split_generators(self, dl_manager):
|
42 |
+
url = _BASE_URL + self.config.name + '.parquet'
|
43 |
+
file_path = dl_manager.download(url)
|
44 |
+
return [
|
45 |
+
datasets.SplitGenerator(
|
46 |
+
name=datasets.Split.TRAIN,
|
47 |
+
gen_kwargs={'file_path': file_path},
|
48 |
+
),
|
49 |
+
]
|
50 |
+
|
51 |
+
def _generate_tables(self, file_path):
|
52 |
+
if file_path.startswith(_BASE_URL):
|
53 |
+
file_path = file_path[len(_BASE_URL):]
|
54 |
+
yield self.config.name, pq.read_table(file_path)
|