vldsavelyev commited on
Commit
2d3e792
·
1 Parent(s): 2253c00

Move parsing fb2s to the self._generate_examples func. Use self.base_path to find fb2s in the repo

Browse files
Files changed (1) hide show
  1. murakami.py +91 -120
murakami.py CHANGED
@@ -1,5 +1,6 @@
1
  """
2
- Parse all paragraphs from all *.fb2 files in the input directory, create a Huggingface Dataset and push it to the Hub as `vldsavelyev/murakami`.
 
3
  """
4
 
5
 
@@ -7,14 +8,13 @@ import os
7
  from pathlib import Path
8
  from lxml import etree
9
  import datasets
10
- from datasets import Dataset
11
- from huggingface_hub import create_repo
12
 
13
  datasets.logging.set_verbosity_info()
14
 
15
 
16
  _DESCRIPTION = """\
17
- Russian translations of Murakami novels, to fine-tune a generative language model. Source is FB2 files from http://flibusta.is/a/8570.
 
18
  """
19
 
20
 
@@ -23,20 +23,7 @@ class Builder(datasets.GeneratorBasedBuilder):
23
 
24
  VERSION = datasets.Version("1.1.0")
25
 
26
- # This is an example of a dataset with multiple configurations.
27
- # If you don't want/need to define several sub-sets in your dataset,
28
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
29
-
30
- # If you need to make complex sub-parts in the datasets with configurable options
31
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
32
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
33
-
34
- # You will be able to load one or the other configurations in the following list with
35
- # data = datasets.load_dataset('my_dataset', 'first_domain')
36
- # data = datasets.load_dataset('my_dataset', 'second_domain')
37
-
38
  def _info(self):
39
- # This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
40
  return datasets.DatasetInfo(
41
  # This is the description that will appear on the datasets page.
42
  description=_DESCRIPTION,
@@ -44,9 +31,95 @@ class Builder(datasets.GeneratorBasedBuilder):
44
  features=datasets.Features({"text": datasets.Value("string")}),
45
  )
46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  # Number of initial <p> element to take from each fb2, by number. This allows to skip
48
  # intros and other junk in the beginning of an fb2. This is built semi-manually using
49
- # the `helper_to_find_first_paragraphs` func.
50
  START_PARAGRAPHS = {
51
  3: 5,
52
  6: 27,
@@ -88,105 +161,3 @@ class Builder(datasets.GeneratorBasedBuilder):
88
  print("❌")
89
  for i, p in enumerate(list(paragraphs)[:30]):
90
  print(f" {i} {p.text}")
91
-
92
- def _split_generators(self, dl_manager):
93
- # This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
94
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
95
- data_dir = "data"
96
- text_by_name = {}
97
- fb2s = list(Path(data_dir).glob("*.fb2"))
98
- if len(fb2s) > 0:
99
- print(f"Found {len(fb2s)} fb2 files in {data_dir}")
100
- else:
101
- raise ValueError(f"No fb2 files found in {data_dir}")
102
-
103
- for bi, path in enumerate(fb2s):
104
- print(bi, path)
105
-
106
- # Load the FB2 format file
107
- with path.open("rb") as file:
108
- fb2_data = file.read()
109
-
110
- # Print structure of the FB2 format file
111
- # print(etree.tostring(etree.fromstring(fb2_data), pretty_print=True))
112
-
113
- # Parse the FB2 format file using lxml
114
- root = etree.fromstring(fb2_data)
115
-
116
- # Get the title of the book
117
- title = root.xpath(
118
- "//fb:title-info/fb:book-title",
119
- namespaces={"fb": "http://www.gribuser.ru/xml/fictionbook/2.0"},
120
- )[0].text
121
- print(title)
122
-
123
- # Get all book paragraphs
124
- paragraphs = root.xpath(
125
- "//fb:p",
126
- namespaces={"fb": "http://www.gribuser.ru/xml/fictionbook/2.0"},
127
- )
128
-
129
- # UNCOMMENT THE LINE BELOW TO BUILD `START_PARAGRAPHS`:
130
- # self.helper_to_find_first_paragraphs(paragraphs, title, bi)
131
-
132
- found_paragraphs = []
133
- skipping = True
134
- for pi, p in enumerate(paragraphs):
135
- if p.text is None:
136
- continue
137
- if (
138
- bi in Builder.START_PARAGRAPHS
139
- and pi >= Builder.START_PARAGRAPHS[bi]
140
- ):
141
- skipping = False
142
- if skipping and p.text.lower() == title.lower():
143
- skipping = False
144
- if not skipping:
145
- found_paragraphs.append(p)
146
- print(f"Found {len(found_paragraphs)} paragraphs")
147
-
148
- text_by_name[title] = ""
149
- for p in found_paragraphs:
150
- text_by_name[title] += p.text.replace(" ", " ") + "\n"
151
- text_by_name[title] += "\n"
152
-
153
- print("Novel by size:")
154
- for title, text in text_by_name.items():
155
- print(f" {title}: {len(text):,} characters")
156
-
157
- smallest_title = min(text_by_name, key=lambda k: len(text_by_name[k]))
158
- print(
159
- f"Using smallest novel {smallest_title} "
160
- f"({len(text_by_name[smallest_title]):,} characters) as a test set"
161
- )
162
-
163
- test_titles = [smallest_title]
164
- train_titles = [t for t in text_by_name if t not in test_titles]
165
-
166
- return [
167
- datasets.SplitGenerator(
168
- name=datasets.Split.TRAIN,
169
- # These kwargs will be passed to _generate_examples
170
- gen_kwargs={
171
- "titles": train_titles,
172
- "texts": [text_by_name[t] for t in train_titles],
173
- "split": "train",
174
- },
175
- ),
176
- datasets.SplitGenerator(
177
- name=datasets.Split.TEST,
178
- # These kwargs will be passed to _generate_examples
179
- gen_kwargs={
180
- "titles": test_titles,
181
- "texts": [text_by_name[t] for t in test_titles],
182
- "split": "test",
183
- },
184
- ),
185
- ]
186
-
187
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
188
- def _generate_examples(self, titles, texts, split):
189
- # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
190
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
191
- for title, text in zip(titles, texts):
192
- yield title, {"text": text}
 
1
  """
2
+ Parse all paragraphs from all *.fb2 files in the input directory, create a Huggingface Dataset and push it
3
+ to the Hub as `vldsavelyev/murakami`.
4
  """
5
 
6
 
 
8
  from pathlib import Path
9
  from lxml import etree
10
  import datasets
 
 
11
 
12
  datasets.logging.set_verbosity_info()
13
 
14
 
15
  _DESCRIPTION = """\
16
+ Russian translations of Murakami novels, to fine-tune a generative language model. Source is FB2 files
17
+ from http://flibusta.is/a/8570.
18
  """
19
 
20
 
 
23
 
24
  VERSION = datasets.Version("1.1.0")
25
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  def _info(self):
 
27
  return datasets.DatasetInfo(
28
  # This is the description that will appear on the datasets page.
29
  description=_DESCRIPTION,
 
31
  features=datasets.Features({"text": datasets.Value("string")}),
32
  )
33
 
34
+ def _split_generators(self, dl_manager: datasets.DownloadManager):
35
+ data_dir = Path(self.base_path) / "data"
36
+ fb2_paths = list(data_dir.glob("*.fb2"))
37
+ if len(fb2_paths) > 0:
38
+ print(f"Found {len(fb2_paths)} fb2 files in {data_dir}")
39
+ else:
40
+ raise ValueError(f"No fb2 files found in {data_dir}")
41
+
42
+ smallest_path = min(fb2_paths, key=os.path.getsize)
43
+ print(f"Using smallest title as a training example: {smallest_path}")
44
+
45
+ return [
46
+ datasets.SplitGenerator(
47
+ name=datasets.Split.TRAIN,
48
+ # These kwargs will be passed to _generate_examples
49
+ gen_kwargs={
50
+ "filepaths": [p for p in fb2_paths if p != smallest_path],
51
+ },
52
+ ),
53
+ datasets.SplitGenerator(
54
+ name=datasets.Split.TEST,
55
+ # These kwargs will be passed to _generate_examples
56
+ gen_kwargs={
57
+ "filepaths": [smallest_path],
58
+ },
59
+ ),
60
+ ]
61
+
62
+ def _generate_examples(self, filepaths):
63
+ for fileidx, filepath in enumerate(filepaths):
64
+ print(fileidx, filepath)
65
+ title, text = self._extract_text_from_fb2(filepath, fileidx)
66
+ yield title, {"text": text}
67
+
68
+ @staticmethod
69
+ def _extract_text_from_fb2(filepath: Path, fileidx: int) -> tuple[str, str]:
70
+ """
71
+ Parse FB2 file and return the concatenation of its paragraphs, along with the title.
72
+ """
73
+ # Load the FB2 format file
74
+ with filepath.open("rb") as file:
75
+ fb2_data = file.read()
76
+
77
+ # Print structure of the FB2 format file
78
+ # print(etree.tostring(etree.fromstring(fb2_data), pretty_print=True))
79
+
80
+ # Parse the FB2 format file using lxml
81
+ root = etree.fromstring(fb2_data)
82
+
83
+ # Get the title of the book
84
+ title = root.xpath(
85
+ "//fb:title-info/fb:book-title",
86
+ namespaces={"fb": "http://www.gribuser.ru/xml/fictionbook/2.0"},
87
+ )[0].text
88
+ print(title)
89
+
90
+ # Get all book paragraphs
91
+ paragraphs = root.xpath(
92
+ "//fb:p",
93
+ namespaces={"fb": "http://www.gribuser.ru/xml/fictionbook/2.0"},
94
+ )
95
+
96
+ # UNCOMMENT THE LINE BELOW TO BUILD `START_PARAGRAPHS`:
97
+ # self.helper_to_find_first_paragraphs(paragraphs, title, bi)
98
+
99
+ found_paragraphs = []
100
+ skipping = True
101
+ for pi, p in enumerate(paragraphs):
102
+ if p.text is None:
103
+ continue
104
+ if (
105
+ fileidx in Builder.START_PARAGRAPHS
106
+ and pi >= Builder.START_PARAGRAPHS[fileidx]
107
+ ):
108
+ skipping = False
109
+ if skipping and p.text.lower() == title.lower():
110
+ skipping = False
111
+ if not skipping:
112
+ found_paragraphs.append(p)
113
+ print(f"Found {len(found_paragraphs)} paragraphs")
114
+ text = ""
115
+ for p in found_paragraphs:
116
+ text += p.text.replace(" ", " ") + "\n"
117
+ text += "\n"
118
+ return title, text
119
+
120
  # Number of initial <p> element to take from each fb2, by number. This allows to skip
121
  # intros and other junk in the beginning of an fb2. This is built semi-manually using
122
+ # the `self.helper_to_find_first_paragraphs` function.
123
  START_PARAGRAPHS = {
124
  3: 5,
125
  6: 27,
 
161
  print("❌")
162
  for i, p in enumerate(list(paragraphs)[:30]):
163
  print(f" {i} {p.text}")