Chesscorner commited on
Commit
be61c57
·
verified ·
1 Parent(s): 292c934

Create chess_ground-targz.py

Browse files
Files changed (1) hide show
  1. chess_ground-targz.py +92 -0
chess_ground-targz.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # TODO: Address all TODOs and remove all explanatory comments
2
+ """chess ground Targz"""
3
+
4
+ import json
5
+ import os
6
+
7
+ import datasets
8
+ from pickleshare import Path
9
+ # TODO: Add description of the dataset here
10
+ # You can copy an official description
11
+ _DESCRIPTION = """\
12
+ Dataset for extracting notations from chess-scoresheets.
13
+ """
14
+ # TODO: Add BibTeX citation
15
+ # Find for instance the citation on arxiv or on the dataset repo/website
16
+ _CITATION = """\
17
+ @InProceedings{huggingface:dataset,
18
+ title = {A great new dataset},
19
+ author={huggingface, Inc.
20
+ },
21
+ year={2020}
22
+ }
23
+ """
24
+ # TODO: Add a link to an official homepage for the dataset here
25
+ _HOMEPAGE = ""
26
+
27
+ # TODO: Add the licence for the dataset here if you can find it
28
+ _LICENSE = "Creative Commons Attribution 3.0"
29
+
30
+ # TODO: Add link to the official dataset URLs here
31
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
32
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
33
+ _URL = "https://huggingface.co/datasets/Chesscorner/jsonl-chess-dataset/tree/main"
34
+
35
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
36
+ class ChessGroundTargz(datasets.GeneratorBasedBuilder):
37
+ """TODO: Short description of my dataset."""
38
+
39
+ def _info(self): # This is the name of the configuration selected in BUILDER_CONFIGS above
40
+ features = datasets.Features(
41
+ {
42
+ "sentence": datasets.Value("string"),
43
+ "option1": datasets.Value("string"),
44
+ "answer": datasets.Value("string")
45
+ # These are the features of your dataset like images, labels ...
46
+ }
47
+ )
48
+ return datasets.DatasetInfo(
49
+ # This is the description that will appear on the datasets page.
50
+ description=_DESCRIPTION,
51
+ # This defines the different columns of the dataset and their types
52
+ features=features, # Here we define them above because they are different between the two configurations
53
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
54
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
55
+ # supervised_keys=("sentence", "label"),
56
+ # Homepage of the dataset for documentation
57
+ homepage=_HOMEPAGE,
58
+ # License for the dataset if available
59
+ license=_LICENSE,
60
+ # Citation for the dataset
61
+ citation=_CITATION,
62
+ )
63
+
64
+ def _split_generators(self, dl_manager):
65
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
66
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
67
+
68
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
69
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
70
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
71
+ path = dl_manager.download_and_extract(_URL)
72
+ return [
73
+ datasets.SplitGenerator(
74
+ name=datasets.Split.TRAIN,
75
+ # These kwargs will be passed to _generate_examples
76
+ gen_kwargs={
77
+ "filepath": os.path.join(path+"/train.jsonl"),
78
+ },
79
+ ),
80
+ ]
81
+
82
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
83
+ def _generate_examples(self, filepath):
84
+
85
+ idx = 0
86
+ # open the file and read the lines
87
+ with open(filepath, encoding="utf-8") as fp:
88
+ for line in fp:
89
+ # load json line
90
+ obj = json.loads(line)
91
+ yield idx, obj
92
+ idx += 1