vladsaveliev commited on
Commit
f281154
·
1 Parent(s): 5f3519b

Add load script

Browse files
Files changed (1) hide show
  1. murakami.py +208 -0
murakami.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Parse all paragraphs from all *.fb2 files in the input directory, create a Huggingface Dataset and push it to the Hub as `vldsavelyev/murakami`.
3
+ """
4
+
5
+
6
+ import os
7
+ from pathlib import Path
8
+ from lxml import etree
9
+ import gdown
10
+ import datasets
11
+ from datasets import Dataset
12
+ from huggingface_hub import create_repo
13
+ import coloredlogs
14
+
15
+ coloredlogs.install(level="info")
16
+ datasets.logging.set_verbosity_info()
17
+
18
+
19
+ _DESCRIPTION = """\
20
+ Russian translations of Murakami novels, to fine-tune a generative language model. Source is FB2 files from http://flibusta.is/a/8570.
21
+ """
22
+
23
+ _URL = "https://drive.google.com/open?id=14Uw_efwj70iip1xJD2H2qHSVMuoUDm4z"
24
+
25
+
26
+ class Builder(datasets.GeneratorBasedBuilder):
27
+ """Murakami novels, translated to Russian."""
28
+
29
+ VERSION = datasets.Version("1.1.0")
30
+
31
+ # This is an example of a dataset with multiple configurations.
32
+ # If you don't want/need to define several sub-sets in your dataset,
33
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
34
+
35
+ # If you need to make complex sub-parts in the datasets with configurable options
36
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
37
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
38
+
39
+ # You will be able to load one or the other configurations in the following list with
40
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
41
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
42
+
43
+ def _info(self):
44
+ # This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
45
+ return datasets.DatasetInfo(
46
+ # This is the description that will appear on the datasets page.
47
+ description=_DESCRIPTION,
48
+ # This defines the different columns of the dataset and their types
49
+ features=datasets.Features({"text": datasets.Value("string")}),
50
+ )
51
+
52
+ # Number of initial <p> element to take from each fb2, by number. This allows to skip
53
+ # intros and other junk in the beginning of an fb2. This is built semi-manually using
54
+ # the `helper_to_find_first_paragraphs` func.
55
+ START_PARAGRAPHS = {
56
+ 3: 5,
57
+ 6: 27,
58
+ 7: 3,
59
+ 9: 4,
60
+ 10: 3,
61
+ 12: 11,
62
+ 18: 5,
63
+ 20: 3,
64
+ 21: 5,
65
+ }
66
+
67
+ @staticmethod
68
+ def helper_to_find_first_paragraphs(paragraphs, title, book_number, n=30):
69
+ """
70
+ Helps to eyeball first few paragraphs of a book to skip junk paragraphs
71
+ in the beginning and manually construct the `tart_paragraphs` dict.
72
+ """
73
+ found_paragraphs = []
74
+ skipping = True
75
+ for i, p in enumerate(list(paragraphs)[:n]):
76
+ if p.text is None:
77
+ continue
78
+ if (
79
+ book_number in Builder.START_PARAGRAPHS
80
+ and i >= Builder.START_PARAGRAPHS[book_number]
81
+ ):
82
+ skipping = False
83
+ if skipping and p.text.lower() == title.lower():
84
+ skipping = False
85
+ if not skipping:
86
+ found_paragraphs.append(f" {i} {p.text}")
87
+
88
+ if found_paragraphs:
89
+ print("✅")
90
+ print("\n".join(found_paragraphs))
91
+
92
+ else:
93
+ print("❌")
94
+ for i, p in enumerate(list(paragraphs)[:30]):
95
+ print(f" {i} {p.text}")
96
+
97
+ def _split_generators(self, dl_manager):
98
+ # This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
99
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
100
+
101
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
102
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
103
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
104
+ data_dir = dl_manager.extract(
105
+ dl_manager.download_custom(
106
+ _URL,
107
+ custom_download=lambda src, dst: gdown.download(src, dst, fuzzy=True),
108
+ )
109
+ )
110
+
111
+ text_by_name = {}
112
+
113
+ fb2s = list(Path(data_dir).glob("*.fb2"))
114
+ if len(fb2s) > 0:
115
+ print(f"Found {len(fb2s)} fb2 files in {data_dir}")
116
+ else:
117
+ raise ValueError(f"No fb2 files found in {data_dir}")
118
+
119
+ for bi, path in enumerate(fb2s):
120
+ print(bi, path)
121
+
122
+ # Load the FB2 format file
123
+ with path.open("rb") as file:
124
+ fb2_data = file.read()
125
+
126
+ # Print structure of the FB2 format file
127
+ # print(etree.tostring(etree.fromstring(fb2_data), pretty_print=True))
128
+
129
+ # Parse the FB2 format file using lxml
130
+ root = etree.fromstring(fb2_data)
131
+
132
+ # Get the title of the book
133
+ title = root.xpath(
134
+ "//fb:title-info/fb:book-title",
135
+ namespaces={"fb": "http://www.gribuser.ru/xml/fictionbook/2.0"},
136
+ )[0].text
137
+ print(title)
138
+
139
+ # Get all book paragraphs
140
+ paragraphs = root.xpath(
141
+ "//fb:p",
142
+ namespaces={"fb": "http://www.gribuser.ru/xml/fictionbook/2.0"},
143
+ )
144
+
145
+ # UNCOMMENT THE LINE BELOW TO BUILD `START_PARAGRAPHS`:
146
+ # self.helper_to_find_first_paragraphs(paragraphs, title, bi)
147
+
148
+ found_paragraphs = []
149
+ skipping = True
150
+ for pi, p in enumerate(paragraphs):
151
+ if p.text is None:
152
+ continue
153
+ if (
154
+ bi in Builder.START_PARAGRAPHS
155
+ and pi >= Builder.START_PARAGRAPHS[bi]
156
+ ):
157
+ skipping = False
158
+ if skipping and p.text.lower() == title.lower():
159
+ skipping = False
160
+ if not skipping:
161
+ found_paragraphs.append(p)
162
+ print(f"Found {len(found_paragraphs)} paragraphs")
163
+
164
+ text_by_name[title] = ""
165
+ for p in found_paragraphs:
166
+ text_by_name[title] += p.text.replace(" ", " ") + "\n"
167
+ text_by_name[title] += "\n"
168
+
169
+ print("Novel by size:")
170
+ for title, text in text_by_name.items():
171
+ print(f" {title}: {len(text):,} characters")
172
+
173
+ smallest_title = min(text_by_name, key=lambda k: len(text_by_name[k]))
174
+ print(
175
+ f"Using smallest novel {smallest_title} "
176
+ f"({len(text_by_name[smallest_title]):,} characters) as a test set"
177
+ )
178
+
179
+ test_titles = [smallest_title]
180
+ train_titles = [t for t in text_by_name if t not in test_titles]
181
+
182
+ return [
183
+ datasets.SplitGenerator(
184
+ name=datasets.Split.TRAIN,
185
+ # These kwargs will be passed to _generate_examples
186
+ gen_kwargs={
187
+ "titles": train_titles,
188
+ "texts": [text_by_name[t] for t in train_titles],
189
+ "split": "train",
190
+ },
191
+ ),
192
+ datasets.SplitGenerator(
193
+ name=datasets.Split.TEST,
194
+ # These kwargs will be passed to _generate_examples
195
+ gen_kwargs={
196
+ "titles": test_titles,
197
+ "texts": [text_by_name[t] for t in test_titles],
198
+ "split": "test",
199
+ },
200
+ ),
201
+ ]
202
+
203
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
204
+ def _generate_examples(self, titles, texts, split):
205
+ # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
206
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
207
+ for title, text in zip(titles, texts):
208
+ yield title, {"text": text}