Datasets:
Upload folder using huggingface_hub
Browse files- .gitattributes +3 -0
- LICENSE +4 -0
- LICENSE-code +373 -0
- README-ufal.md +114 -0
- README.md +36 -0
- downloader.py +127 -0
- downloader_extractor.py +137 -0
- downloader_extractor_utils.py +350 -0
- gitattributes +58 -0
- requirements.txt +3 -0
- rouge_raw.py +131 -0
- sumeczech-1.0-dev.jsonl +3 -0
- sumeczech-1.0-index.jsonl.xz +3 -0
- sumeczech-1.0-oodtest.jsonl +3 -0
- sumeczech-1.0-test.jsonl +0 -0
- sumeczech-1.0-train.jsonl +3 -0
.gitattributes
CHANGED
@@ -53,3 +53,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
53 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
53 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
56 |
+
sumeczech-1.0-dev.jsonl filter=lfs diff=lfs merge=lfs -text
|
57 |
+
sumeczech-1.0-oodtest.jsonl filter=lfs diff=lfs merge=lfs -text
|
58 |
+
sumeczech-1.0-train.jsonl filter=lfs diff=lfs merge=lfs -text
|
LICENSE
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
code for download and eval: Mozilla Public License 2.0
|
2 |
+
data: not-for-distribution
|
3 |
+
|
4 |
+
see https://lindat.mff.cuni.cz/repository/xmlui/handle/11234/1-2615
|
LICENSE-code
ADDED
@@ -0,0 +1,373 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Mozilla Public License Version 2.0
|
2 |
+
==================================
|
3 |
+
|
4 |
+
1. Definitions
|
5 |
+
--------------
|
6 |
+
|
7 |
+
1.1. "Contributor"
|
8 |
+
means each individual or legal entity that creates, contributes to
|
9 |
+
the creation of, or owns Covered Software.
|
10 |
+
|
11 |
+
1.2. "Contributor Version"
|
12 |
+
means the combination of the Contributions of others (if any) used
|
13 |
+
by a Contributor and that particular Contributor's Contribution.
|
14 |
+
|
15 |
+
1.3. "Contribution"
|
16 |
+
means Covered Software of a particular Contributor.
|
17 |
+
|
18 |
+
1.4. "Covered Software"
|
19 |
+
means Source Code Form to which the initial Contributor has attached
|
20 |
+
the notice in Exhibit A, the Executable Form of such Source Code
|
21 |
+
Form, and Modifications of such Source Code Form, in each case
|
22 |
+
including portions thereof.
|
23 |
+
|
24 |
+
1.5. "Incompatible With Secondary Licenses"
|
25 |
+
means
|
26 |
+
|
27 |
+
(a) that the initial Contributor has attached the notice described
|
28 |
+
in Exhibit B to the Covered Software; or
|
29 |
+
|
30 |
+
(b) that the Covered Software was made available under the terms of
|
31 |
+
version 1.1 or earlier of the License, but not also under the
|
32 |
+
terms of a Secondary License.
|
33 |
+
|
34 |
+
1.6. "Executable Form"
|
35 |
+
means any form of the work other than Source Code Form.
|
36 |
+
|
37 |
+
1.7. "Larger Work"
|
38 |
+
means a work that combines Covered Software with other material, in
|
39 |
+
a separate file or files, that is not Covered Software.
|
40 |
+
|
41 |
+
1.8. "License"
|
42 |
+
means this document.
|
43 |
+
|
44 |
+
1.9. "Licensable"
|
45 |
+
means having the right to grant, to the maximum extent possible,
|
46 |
+
whether at the time of the initial grant or subsequently, any and
|
47 |
+
all of the rights conveyed by this License.
|
48 |
+
|
49 |
+
1.10. "Modifications"
|
50 |
+
means any of the following:
|
51 |
+
|
52 |
+
(a) any file in Source Code Form that results from an addition to,
|
53 |
+
deletion from, or modification of the contents of Covered
|
54 |
+
Software; or
|
55 |
+
|
56 |
+
(b) any new file in Source Code Form that contains any Covered
|
57 |
+
Software.
|
58 |
+
|
59 |
+
1.11. "Patent Claims" of a Contributor
|
60 |
+
means any patent claim(s), including without limitation, method,
|
61 |
+
process, and apparatus claims, in any patent Licensable by such
|
62 |
+
Contributor that would be infringed, but for the grant of the
|
63 |
+
License, by the making, using, selling, offering for sale, having
|
64 |
+
made, import, or transfer of either its Contributions or its
|
65 |
+
Contributor Version.
|
66 |
+
|
67 |
+
1.12. "Secondary License"
|
68 |
+
means either the GNU General Public License, Version 2.0, the GNU
|
69 |
+
Lesser General Public License, Version 2.1, the GNU Affero General
|
70 |
+
Public License, Version 3.0, or any later versions of those
|
71 |
+
licenses.
|
72 |
+
|
73 |
+
1.13. "Source Code Form"
|
74 |
+
means the form of the work preferred for making modifications.
|
75 |
+
|
76 |
+
1.14. "You" (or "Your")
|
77 |
+
means an individual or a legal entity exercising rights under this
|
78 |
+
License. For legal entities, "You" includes any entity that
|
79 |
+
controls, is controlled by, or is under common control with You. For
|
80 |
+
purposes of this definition, "control" means (a) the power, direct
|
81 |
+
or indirect, to cause the direction or management of such entity,
|
82 |
+
whether by contract or otherwise, or (b) ownership of more than
|
83 |
+
fifty percent (50%) of the outstanding shares or beneficial
|
84 |
+
ownership of such entity.
|
85 |
+
|
86 |
+
2. License Grants and Conditions
|
87 |
+
--------------------------------
|
88 |
+
|
89 |
+
2.1. Grants
|
90 |
+
|
91 |
+
Each Contributor hereby grants You a world-wide, royalty-free,
|
92 |
+
non-exclusive license:
|
93 |
+
|
94 |
+
(a) under intellectual property rights (other than patent or trademark)
|
95 |
+
Licensable by such Contributor to use, reproduce, make available,
|
96 |
+
modify, display, perform, distribute, and otherwise exploit its
|
97 |
+
Contributions, either on an unmodified basis, with Modifications, or
|
98 |
+
as part of a Larger Work; and
|
99 |
+
|
100 |
+
(b) under Patent Claims of such Contributor to make, use, sell, offer
|
101 |
+
for sale, have made, import, and otherwise transfer either its
|
102 |
+
Contributions or its Contributor Version.
|
103 |
+
|
104 |
+
2.2. Effective Date
|
105 |
+
|
106 |
+
The licenses granted in Section 2.1 with respect to any Contribution
|
107 |
+
become effective for each Contribution on the date the Contributor first
|
108 |
+
distributes such Contribution.
|
109 |
+
|
110 |
+
2.3. Limitations on Grant Scope
|
111 |
+
|
112 |
+
The licenses granted in this Section 2 are the only rights granted under
|
113 |
+
this License. No additional rights or licenses will be implied from the
|
114 |
+
distribution or licensing of Covered Software under this License.
|
115 |
+
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
116 |
+
Contributor:
|
117 |
+
|
118 |
+
(a) for any code that a Contributor has removed from Covered Software;
|
119 |
+
or
|
120 |
+
|
121 |
+
(b) for infringements caused by: (i) Your and any other third party's
|
122 |
+
modifications of Covered Software, or (ii) the combination of its
|
123 |
+
Contributions with other software (except as part of its Contributor
|
124 |
+
Version); or
|
125 |
+
|
126 |
+
(c) under Patent Claims infringed by Covered Software in the absence of
|
127 |
+
its Contributions.
|
128 |
+
|
129 |
+
This License does not grant any rights in the trademarks, service marks,
|
130 |
+
or logos of any Contributor (except as may be necessary to comply with
|
131 |
+
the notice requirements in Section 3.4).
|
132 |
+
|
133 |
+
2.4. Subsequent Licenses
|
134 |
+
|
135 |
+
No Contributor makes additional grants as a result of Your choice to
|
136 |
+
distribute the Covered Software under a subsequent version of this
|
137 |
+
License (see Section 10.2) or under the terms of a Secondary License (if
|
138 |
+
permitted under the terms of Section 3.3).
|
139 |
+
|
140 |
+
2.5. Representation
|
141 |
+
|
142 |
+
Each Contributor represents that the Contributor believes its
|
143 |
+
Contributions are its original creation(s) or it has sufficient rights
|
144 |
+
to grant the rights to its Contributions conveyed by this License.
|
145 |
+
|
146 |
+
2.6. Fair Use
|
147 |
+
|
148 |
+
This License is not intended to limit any rights You have under
|
149 |
+
applicable copyright doctrines of fair use, fair dealing, or other
|
150 |
+
equivalents.
|
151 |
+
|
152 |
+
2.7. Conditions
|
153 |
+
|
154 |
+
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
|
155 |
+
in Section 2.1.
|
156 |
+
|
157 |
+
3. Responsibilities
|
158 |
+
-------------------
|
159 |
+
|
160 |
+
3.1. Distribution of Source Form
|
161 |
+
|
162 |
+
All distribution of Covered Software in Source Code Form, including any
|
163 |
+
Modifications that You create or to which You contribute, must be under
|
164 |
+
the terms of this License. You must inform recipients that the Source
|
165 |
+
Code Form of the Covered Software is governed by the terms of this
|
166 |
+
License, and how they can obtain a copy of this License. You may not
|
167 |
+
attempt to alter or restrict the recipients' rights in the Source Code
|
168 |
+
Form.
|
169 |
+
|
170 |
+
3.2. Distribution of Executable Form
|
171 |
+
|
172 |
+
If You distribute Covered Software in Executable Form then:
|
173 |
+
|
174 |
+
(a) such Covered Software must also be made available in Source Code
|
175 |
+
Form, as described in Section 3.1, and You must inform recipients of
|
176 |
+
the Executable Form how they can obtain a copy of such Source Code
|
177 |
+
Form by reasonable means in a timely manner, at a charge no more
|
178 |
+
than the cost of distribution to the recipient; and
|
179 |
+
|
180 |
+
(b) You may distribute such Executable Form under the terms of this
|
181 |
+
License, or sublicense it under different terms, provided that the
|
182 |
+
license for the Executable Form does not attempt to limit or alter
|
183 |
+
the recipients' rights in the Source Code Form under this License.
|
184 |
+
|
185 |
+
3.3. Distribution of a Larger Work
|
186 |
+
|
187 |
+
You may create and distribute a Larger Work under terms of Your choice,
|
188 |
+
provided that You also comply with the requirements of this License for
|
189 |
+
the Covered Software. If the Larger Work is a combination of Covered
|
190 |
+
Software with a work governed by one or more Secondary Licenses, and the
|
191 |
+
Covered Software is not Incompatible With Secondary Licenses, this
|
192 |
+
License permits You to additionally distribute such Covered Software
|
193 |
+
under the terms of such Secondary License(s), so that the recipient of
|
194 |
+
the Larger Work may, at their option, further distribute the Covered
|
195 |
+
Software under the terms of either this License or such Secondary
|
196 |
+
License(s).
|
197 |
+
|
198 |
+
3.4. Notices
|
199 |
+
|
200 |
+
You may not remove or alter the substance of any license notices
|
201 |
+
(including copyright notices, patent notices, disclaimers of warranty,
|
202 |
+
or limitations of liability) contained within the Source Code Form of
|
203 |
+
the Covered Software, except that You may alter any license notices to
|
204 |
+
the extent required to remedy known factual inaccuracies.
|
205 |
+
|
206 |
+
3.5. Application of Additional Terms
|
207 |
+
|
208 |
+
You may choose to offer, and to charge a fee for, warranty, support,
|
209 |
+
indemnity or liability obligations to one or more recipients of Covered
|
210 |
+
Software. However, You may do so only on Your own behalf, and not on
|
211 |
+
behalf of any Contributor. You must make it absolutely clear that any
|
212 |
+
such warranty, support, indemnity, or liability obligation is offered by
|
213 |
+
You alone, and You hereby agree to indemnify every Contributor for any
|
214 |
+
liability incurred by such Contributor as a result of warranty, support,
|
215 |
+
indemnity or liability terms You offer. You may include additional
|
216 |
+
disclaimers of warranty and limitations of liability specific to any
|
217 |
+
jurisdiction.
|
218 |
+
|
219 |
+
4. Inability to Comply Due to Statute or Regulation
|
220 |
+
---------------------------------------------------
|
221 |
+
|
222 |
+
If it is impossible for You to comply with any of the terms of this
|
223 |
+
License with respect to some or all of the Covered Software due to
|
224 |
+
statute, judicial order, or regulation then You must: (a) comply with
|
225 |
+
the terms of this License to the maximum extent possible; and (b)
|
226 |
+
describe the limitations and the code they affect. Such description must
|
227 |
+
be placed in a text file included with all distributions of the Covered
|
228 |
+
Software under this License. Except to the extent prohibited by statute
|
229 |
+
or regulation, such description must be sufficiently detailed for a
|
230 |
+
recipient of ordinary skill to be able to understand it.
|
231 |
+
|
232 |
+
5. Termination
|
233 |
+
--------------
|
234 |
+
|
235 |
+
5.1. The rights granted under this License will terminate automatically
|
236 |
+
if You fail to comply with any of its terms. However, if You become
|
237 |
+
compliant, then the rights granted under this License from a particular
|
238 |
+
Contributor are reinstated (a) provisionally, unless and until such
|
239 |
+
Contributor explicitly and finally terminates Your grants, and (b) on an
|
240 |
+
ongoing basis, if such Contributor fails to notify You of the
|
241 |
+
non-compliance by some reasonable means prior to 60 days after You have
|
242 |
+
come back into compliance. Moreover, Your grants from a particular
|
243 |
+
Contributor are reinstated on an ongoing basis if such Contributor
|
244 |
+
notifies You of the non-compliance by some reasonable means, this is the
|
245 |
+
first time You have received notice of non-compliance with this License
|
246 |
+
from such Contributor, and You become compliant prior to 30 days after
|
247 |
+
Your receipt of the notice.
|
248 |
+
|
249 |
+
5.2. If You initiate litigation against any entity by asserting a patent
|
250 |
+
infringement claim (excluding declaratory judgment actions,
|
251 |
+
counter-claims, and cross-claims) alleging that a Contributor Version
|
252 |
+
directly or indirectly infringes any patent, then the rights granted to
|
253 |
+
You by any and all Contributors for the Covered Software under Section
|
254 |
+
2.1 of this License shall terminate.
|
255 |
+
|
256 |
+
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
|
257 |
+
end user license agreements (excluding distributors and resellers) which
|
258 |
+
have been validly granted by You or Your distributors under this License
|
259 |
+
prior to termination shall survive termination.
|
260 |
+
|
261 |
+
************************************************************************
|
262 |
+
* *
|
263 |
+
* 6. Disclaimer of Warranty *
|
264 |
+
* ------------------------- *
|
265 |
+
* *
|
266 |
+
* Covered Software is provided under this License on an "as is" *
|
267 |
+
* basis, without warranty of any kind, either expressed, implied, or *
|
268 |
+
* statutory, including, without limitation, warranties that the *
|
269 |
+
* Covered Software is free of defects, merchantable, fit for a *
|
270 |
+
* particular purpose or non-infringing. The entire risk as to the *
|
271 |
+
* quality and performance of the Covered Software is with You. *
|
272 |
+
* Should any Covered Software prove defective in any respect, You *
|
273 |
+
* (not any Contributor) assume the cost of any necessary servicing, *
|
274 |
+
* repair, or correction. This disclaimer of warranty constitutes an *
|
275 |
+
* essential part of this License. No use of any Covered Software is *
|
276 |
+
* authorized under this License except under this disclaimer. *
|
277 |
+
* *
|
278 |
+
************************************************************************
|
279 |
+
|
280 |
+
************************************************************************
|
281 |
+
* *
|
282 |
+
* 7. Limitation of Liability *
|
283 |
+
* -------------------------- *
|
284 |
+
* *
|
285 |
+
* Under no circumstances and under no legal theory, whether tort *
|
286 |
+
* (including negligence), contract, or otherwise, shall any *
|
287 |
+
* Contributor, or anyone who distributes Covered Software as *
|
288 |
+
* permitted above, be liable to You for any direct, indirect, *
|
289 |
+
* special, incidental, or consequential damages of any character *
|
290 |
+
* including, without limitation, damages for lost profits, loss of *
|
291 |
+
* goodwill, work stoppage, computer failure or malfunction, or any *
|
292 |
+
* and all other commercial damages or losses, even if such party *
|
293 |
+
* shall have been informed of the possibility of such damages. This *
|
294 |
+
* limitation of liability shall not apply to liability for death or *
|
295 |
+
* personal injury resulting from such party's negligence to the *
|
296 |
+
* extent applicable law prohibits such limitation. Some *
|
297 |
+
* jurisdictions do not allow the exclusion or limitation of *
|
298 |
+
* incidental or consequential damages, so this exclusion and *
|
299 |
+
* limitation may not apply to You. *
|
300 |
+
* *
|
301 |
+
************************************************************************
|
302 |
+
|
303 |
+
8. Litigation
|
304 |
+
-------------
|
305 |
+
|
306 |
+
Any litigation relating to this License may be brought only in the
|
307 |
+
courts of a jurisdiction where the defendant maintains its principal
|
308 |
+
place of business and such litigation shall be governed by laws of that
|
309 |
+
jurisdiction, without reference to its conflict-of-law provisions.
|
310 |
+
Nothing in this Section shall prevent a party's ability to bring
|
311 |
+
cross-claims or counter-claims.
|
312 |
+
|
313 |
+
9. Miscellaneous
|
314 |
+
----------------
|
315 |
+
|
316 |
+
This License represents the complete agreement concerning the subject
|
317 |
+
matter hereof. If any provision of this License is held to be
|
318 |
+
unenforceable, such provision shall be reformed only to the extent
|
319 |
+
necessary to make it enforceable. Any law or regulation which provides
|
320 |
+
that the language of a contract shall be construed against the drafter
|
321 |
+
shall not be used to construe this License against a Contributor.
|
322 |
+
|
323 |
+
10. Versions of the License
|
324 |
+
---------------------------
|
325 |
+
|
326 |
+
10.1. New Versions
|
327 |
+
|
328 |
+
Mozilla Foundation is the license steward. Except as provided in Section
|
329 |
+
10.3, no one other than the license steward has the right to modify or
|
330 |
+
publish new versions of this License. Each version will be given a
|
331 |
+
distinguishing version number.
|
332 |
+
|
333 |
+
10.2. Effect of New Versions
|
334 |
+
|
335 |
+
You may distribute the Covered Software under the terms of the version
|
336 |
+
of the License under which You originally received the Covered Software,
|
337 |
+
or under the terms of any subsequent version published by the license
|
338 |
+
steward.
|
339 |
+
|
340 |
+
10.3. Modified Versions
|
341 |
+
|
342 |
+
If you create software not governed by this License, and you want to
|
343 |
+
create a new license for such software, you may create and use a
|
344 |
+
modified version of this License if you rename the license and remove
|
345 |
+
any references to the name of the license steward (except to note that
|
346 |
+
such modified license differs from this License).
|
347 |
+
|
348 |
+
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
349 |
+
Licenses
|
350 |
+
|
351 |
+
If You choose to distribute Source Code Form that is Incompatible With
|
352 |
+
Secondary Licenses under the terms of this version of the License, the
|
353 |
+
notice described in Exhibit B of this License must be attached.
|
354 |
+
|
355 |
+
Exhibit A - Source Code Form License Notice
|
356 |
+
-------------------------------------------
|
357 |
+
|
358 |
+
This Source Code Form is subject to the terms of the Mozilla Public
|
359 |
+
License, v. 2.0. If a copy of the MPL was not distributed with this
|
360 |
+
file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
361 |
+
|
362 |
+
If it is not possible or desirable to put the notice in a particular
|
363 |
+
file, then You may include the notice in a location (such as a LICENSE
|
364 |
+
file in a relevant directory) where a recipient would be likely to look
|
365 |
+
for such a notice.
|
366 |
+
|
367 |
+
You may add additional accurate notices of copyright ownership.
|
368 |
+
|
369 |
+
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
370 |
+
---------------------------------------------------------
|
371 |
+
|
372 |
+
This Source Code Form is "Incompatible With Secondary Licenses", as
|
373 |
+
defined by the Mozilla Public License, v. 2.0.
|
README-ufal.md
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# SumeCzech Corpus
|
2 |
+
|
3 |
+
These are the accompanying materials of the paper:
|
4 |
+
```
|
5 |
+
@inproceedings{straka-etal-2018-sumeczech,
|
6 |
+
title = "{S}ume{C}zech: Large {C}zech News-Based Summarization Dataset",
|
7 |
+
author = "Straka, Milan and Mediankin, Nikita and Kocmi, Tom and
|
8 |
+
{\v{Z}}abokrtsk{\'y}, Zden{\v{e}}k and Hude{\v{c}}ek, Vojt{\v{e}}ch and Haji{\v{c}}, Jan",
|
9 |
+
booktitle = "Proceedings of the Eleventh International Conference on Language Resources and Evaluation ({LREC}-2018)",
|
10 |
+
month = may,
|
11 |
+
year = "2018",
|
12 |
+
address = "Miyazaki, Japan",
|
13 |
+
publisher = "European Languages Resources Association (ELRA)",
|
14 |
+
}
|
15 |
+
```
|
16 |
+
|
17 |
+
## SumeCzech Download Script
|
18 |
+
|
19 |
+
To download the SumeCzech dataset, use the `downloader.py` script.
|
20 |
+
The script has several dependencies (and requires an exact version for
|
21 |
+
some of them) listed in `requirements.txt`, you can install them
|
22 |
+
using `pip3 install -r requirements.txt`.
|
23 |
+
|
24 |
+
You can start the script using `python3 downloader.py`. By default,
|
25 |
+
16 parallel processes are used to download the data (you can
|
26 |
+
override this number using the `--parallel N` option).
|
27 |
+
|
28 |
+
During download, MD5 hash of every document's headline, abstract and text
|
29 |
+
is checked with the official one, allowing to detect possible errors
|
30 |
+
during download and extraction. Although not recommended, the check
|
31 |
+
can be bypassed by using the `--no_verify_md5` option.
|
32 |
+
|
33 |
+
The validated documents are saved during download. If the download script
|
34 |
+
is interrupted and run again, it will reuse the already processed
|
35 |
+
documents and only download new ones.
|
36 |
+
|
37 |
+
### Changelog:
|
38 |
+
|
39 |
+
- 13 Feb 2018: The original download script was released.
|
40 |
+
|
41 |
+
- 25 Feb 2023: An update with the following changes:
|
42 |
+
- use the new domain https://data.commoncrawl.org of the CC download;
|
43 |
+
- support Python 3.10 and 3.11, where `collections.Callable` was removed.
|
44 |
+
|
45 |
+
|
46 |
+
## SumeCzech ROUGE_RAW Evaluation Metric
|
47 |
+
|
48 |
+
The RougeRAW metric is implemented in `rouge_raw.py` module, which can
|
49 |
+
compute the RougeRAW-1, RougeRAW-2, RougeRAW-L metrics either for
|
50 |
+
a single pair of documents, or for a pair of corpora.
|
51 |
+
|
52 |
+
Unfortunately, slightly different tokenization was used in the original
|
53 |
+
paper. Therefore, here we provide the results of the systems from the paper
|
54 |
+
evaluated using the `rouge_raw.py` module.
|
55 |
+
|
56 |
+
### Results for abstract-headline on test
|
57 |
+
```
|
58 |
+
RougeRAW-1 RougeRAW-2 RougeRAW-L
|
59 |
+
Method P R F P R F P R F
|
60 |
+
first 13.9 23.6 16.5 04.1 07.4 05.0 12.2 20.7 14.5
|
61 |
+
random 11.0 17.8 12.8 02.6 04.5 03.1 09.6 15.5 11.1
|
62 |
+
textrank 13.3 22.8 15.9 03.7 06.8 04.6 11.6 19.9 13.8
|
63 |
+
t2t 20.2 15.9 17.2 06.7 05.1 05.6 18.6 14.7 15.8
|
64 |
+
```
|
65 |
+
|
66 |
+
### Results for abstract-headline on oodtest
|
67 |
+
```
|
68 |
+
RougeRAW-1 RougeRAW-2 RougeRAW-L
|
69 |
+
Method P R F P R F P R F
|
70 |
+
first 13.3 26.5 16.7 04.7 10.0 06.0 11.6 23.3 14.7
|
71 |
+
random 10.6 20.7 13.1 03.2 06.9 04.1 09.3 18.2 11.5
|
72 |
+
textrank 12.8 25.9 16.3 04.5 09.6 05.7 11.3 22.7 14.2
|
73 |
+
t2t 19.4 15.1 16.3 07.1 05.2 05.7 18.1 14.1 15.2
|
74 |
+
```
|
75 |
+
|
76 |
+
### Results for text-headline on test
|
77 |
+
```
|
78 |
+
RougeRAW-1 RougeRAW-2 RougeRAW-L
|
79 |
+
Method P R F P R F P R F
|
80 |
+
first 07.4 13.5 08.9 01.1 02.2 01.3 06.5 11.7 07.7
|
81 |
+
random 05.9 10.3 06.9 00.5 01.0 00.6 05.2 08.9 06.0
|
82 |
+
textrank 06.0 16.5 08.3 00.8 02.3 01.1 05.0 13.8 06.9
|
83 |
+
t2t 08.8 07.0 07.5 00.8 00.6 00.7 08.1 06.5 07.0
|
84 |
+
```
|
85 |
+
|
86 |
+
### Results for text-headline on oodtest
|
87 |
+
```
|
88 |
+
RougeRAW-1 RougeRAW-2 RougeRAW-L
|
89 |
+
Method P R F P R F P R F
|
90 |
+
first 06.7 13.6 08.3 01.3 02.8 01.6 05.9 12.0 07.4
|
91 |
+
random 05.2 10.0 06.3 00.6 01.4 00.8 04.6 08.9 05.6
|
92 |
+
textrank 05.8 16.9 08.1 01.1 03.4 01.5 05.0 14.5 06.9
|
93 |
+
t2t 06.3 05.1 05.5 00.5 00.4 00.4 05.9 04.8 05.1
|
94 |
+
```
|
95 |
+
|
96 |
+
### Results for text-abstract on test
|
97 |
+
```
|
98 |
+
RougeRAW-1 RougeRAW-2 RougeRAW-L
|
99 |
+
Method P R F P R F P R F
|
100 |
+
first 13.1 17.9 14.4 01.9 02.8 02.1 08.8 12.0 09.6
|
101 |
+
random 11.7 15.5 12.7 01.2 01.7 01.3 07.7 10.3 08.4
|
102 |
+
textrank 11.1 20.8 13.8 01.6 03.1 02.0 07.1 13.4 08.9
|
103 |
+
t2t 13.2 10.5 11.3 01.2 00.9 01.0 10.2 08.1 08.7
|
104 |
+
```
|
105 |
+
|
106 |
+
### Results for text-abstract on oodtest
|
107 |
+
```
|
108 |
+
RougeRAW-1 RougeRAW-2 RougeRAW-L
|
109 |
+
Method P R F P R F P R F
|
110 |
+
first 11.1 17.1 12.7 01.6 02.7 01.9 07.6 11.7 08.7
|
111 |
+
random 10.1 15.1 11.4 01.0 01.7 01.2 06.9 10.3 07.8
|
112 |
+
textrank 09.8 19.9 12.5 01.5 03.3 02.0 06.6 13.3 08.4
|
113 |
+
t2t 12.5 09.4 10.3 00.8 00.6 00.6 09.8 07.5 08.1
|
114 |
+
```
|
README.md
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: other
|
3 |
+
license_name: mixed
|
4 |
+
license_link: LICENSE
|
5 |
+
task_categories:
|
6 |
+
- summarization
|
7 |
+
pretty_name: SumeCzech
|
8 |
+
---
|
9 |
+
# Dataset Card for Dataset Name
|
10 |
+
|
11 |
+
<!-- Provide a quick summary of the dataset. -->
|
12 |
+
|
13 |
+
SumeCzech is a 1-million-document dataset of Czech news, each consisting of:
|
14 |
+
- headline;
|
15 |
+
- abstract (visually distinguished first paragraph);
|
16 |
+
- rest of the text.
|
17 |
+
|
18 |
+
developed by https://ufal.mff.cuni.cz
|
19 |
+
|
20 |
+
## Dataset Details
|
21 |
+
|
22 |
+
### Dataset Description
|
23 |
+
|
24 |
+
<!-- Provide a longer summary of what this dataset is. -->
|
25 |
+
|
26 |
+
|
27 |
+
|
28 |
+
- **Language(s) (NLP):** Czech
|
29 |
+
- **License:** code for download and eval Mozilla Public License 2.0, data not-for-distribution
|
30 |
+
|
31 |
+
### Dataset Sources [optional]
|
32 |
+
|
33 |
+
<!-- Provide the basic links for the dataset. -->
|
34 |
+
|
35 |
+
- **Repository:** https://lindat.mff.cuni.cz/repository/xmlui/handle/11234/1-2615
|
36 |
+
- **Repository:** home https://ufal.mff.cuni.cz/sumeczech
|
downloader.py
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
#
|
3 |
+
# This file is part of SumeCzech corpus <http://hdl.handle.net/11234/1-2615>.
|
4 |
+
#
|
5 |
+
# Copyright 2018 Institute of Formal and Applied Linguistics, Faculty of
|
6 |
+
# Mathematics and Physics, Charles University in Prague, Czech Republic.
|
7 |
+
#
|
8 |
+
# This Source Code Form is subject to the terms of the Mozilla Public
|
9 |
+
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
10 |
+
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
11 |
+
|
12 |
+
# Changelog:
|
13 |
+
# - 13 Feb 2018: Original release of version 1.0.
|
14 |
+
# - 25 Feb 2023: An update with the following changes:
|
15 |
+
# - use the new domain https://data.commoncrawl.org of the CC download;
|
16 |
+
# - support Python 3.10 and 3.11, where `collections.Callable` was removed.
|
17 |
+
|
18 |
+
import argparse
|
19 |
+
import collections
|
20 |
+
import gzip
|
21 |
+
import json
|
22 |
+
import lzma
|
23 |
+
import multiprocessing.pool
|
24 |
+
import sys
|
25 |
+
import urllib.request
|
26 |
+
|
27 |
+
# For Python 3.10+, `collections.Callable` was removed, but it is needed
|
28 |
+
# by both beautifulsoup4==4.6.0 and python-dateutil==2.6.1.
|
29 |
+
collections.Callable = collections.abc.Callable
|
30 |
+
|
31 |
+
from downloader_extractor import Extractor
|
32 |
+
|
33 |
+
DATASETS = ["train", "dev", "test", "oodtest"]
|
34 |
+
|
35 |
+
# Parse arguments
|
36 |
+
parser = argparse.ArgumentParser()
|
37 |
+
parser.add_argument("--download_start", default=None, type=int,
|
38 |
+
help="Index of the first element to download")
|
39 |
+
parser.add_argument("--download_end", default=None, type=int,
|
40 |
+
help="Index of the last element to download")
|
41 |
+
parser.add_argument("--index_file", default="sumeczech-1.0-index.jsonl.xz", type=str,
|
42 |
+
help="Name of the index file to use")
|
43 |
+
parser.add_argument("--no_verify_md5", default=False, action="store_true",
|
44 |
+
help="Dangerous; do not verify MD5 of the downloaded documents")
|
45 |
+
parser.add_argument("--parallel", default=16, type=int,
|
46 |
+
help="Number of parallel processes to use")
|
47 |
+
parser.add_argument("--output_file", default="sumeczech-1.0-{}.jsonl", type=str,
|
48 |
+
help="Output file name template to use")
|
49 |
+
args = parser.parse_args()
|
50 |
+
|
51 |
+
# Load the index
|
52 |
+
print("Loading the index file.", file=sys.stderr)
|
53 |
+
index = []
|
54 |
+
with lzma.LZMAFile(args.index_file, "r") as index_file:
|
55 |
+
for line in index_file:
|
56 |
+
index.append(json.loads(line.decode("utf-8")))
|
57 |
+
|
58 |
+
# Open the output files and load
|
59 |
+
print("Loading previously downloaded data.", file=sys.stderr)
|
60 |
+
datasets = {}
|
61 |
+
for dataset in DATASETS:
|
62 |
+
datasets[dataset] = {
|
63 |
+
"file": open(args.output_file.format(dataset), "a+", encoding="utf-8"),
|
64 |
+
"md5s": set()
|
65 |
+
}
|
66 |
+
datasets[dataset]["file"].seek(0)
|
67 |
+
for i, line in enumerate(datasets[dataset]["file"]):
|
68 |
+
assert line.endswith("\n"), "The last line of {} is not properly ended".format(
|
69 |
+
args.output_file.format(dataset))
|
70 |
+
try:
|
71 |
+
entry = json.loads(line)
|
72 |
+
datasets[dataset]["md5s"].add(entry["md5"])
|
73 |
+
except:
|
74 |
+
raise ValueError("Cannot decode the line {} from {}".format(
|
75 |
+
i + 1, args.output_file.format(dataset)))
|
76 |
+
|
77 |
+
# Download and extract the given entry
|
78 |
+
def download_extract(entry):
|
79 |
+
dataset = entry["dataset"]
|
80 |
+
if entry["md5"] in datasets[dataset]["md5s"]:
|
81 |
+
return None
|
82 |
+
|
83 |
+
tries = 0
|
84 |
+
while True:
|
85 |
+
try:
|
86 |
+
with urllib.request.urlopen(urllib.request.Request(
|
87 |
+
"https://data.commoncrawl.org/{}".format(entry["filename"]),
|
88 |
+
headers={"Range": "bytes={}-{}".format(entry["offset"], entry["offset"] + entry["length"] - 1)})) as response:
|
89 |
+
with gzip.GzipFile(fileobj=response) as decompressed_response:
|
90 |
+
entry["content"] = decompressed_response.read().decode("latin-1")
|
91 |
+
break
|
92 |
+
except:
|
93 |
+
tries += 1
|
94 |
+
if tries < 10:
|
95 |
+
print("Error during download of entry {}, retrying".format(entry), file=sys.stderr)
|
96 |
+
else:
|
97 |
+
print("Too many errors during download of entry {}, aborting".format(entry), file=sys.stderr)
|
98 |
+
raise
|
99 |
+
|
100 |
+
extracted = Extractor.extract_document(entry)
|
101 |
+
del entry["content"]
|
102 |
+
|
103 |
+
correct_hash = extracted["md5"] == entry["md5"]
|
104 |
+
if not correct_hash:
|
105 |
+
if not args.no_verify_md5:
|
106 |
+
assert correct_hash, "MD5 verification failed for entry {}, aborting".format(entry)
|
107 |
+
else:
|
108 |
+
print("MD5 verification failed for entry {}, but continuing as requested".format(entry), file=sys.stderr)
|
109 |
+
|
110 |
+
return extracted
|
111 |
+
|
112 |
+
print("Downloading the data.", file=sys.stderr)
|
113 |
+
entries = index[args.download_start:args.download_end]
|
114 |
+
if args.parallel > 1:
|
115 |
+
pool = multiprocessing.pool.Pool(args.parallel, initializer=lambda: sys.setrecursionlimit(1100))
|
116 |
+
processed_entries = pool.imap(download_extract, entries)
|
117 |
+
else:
|
118 |
+
processed_entries = map(download_extract, entries)
|
119 |
+
|
120 |
+
for i, processed_entry in enumerate(processed_entries):
|
121 |
+
if processed_entry is not None:
|
122 |
+
datasets[processed_entry["dataset"]]["file"].write(json.dumps(
|
123 |
+
processed_entry, ensure_ascii=False, sort_keys=True, indent=None, separators=(", ", ": ")) + "\n")
|
124 |
+
if (i + 1) % 1000 == 0:
|
125 |
+
print("Downloaded {}/{} documents.".format(i + 1, len(entries)), end="\r", file=sys.stderr, flush=True)
|
126 |
+
|
127 |
+
print("All data downloaded successfully.", file=sys.stderr)
|
downloader_extractor.py
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
#
|
3 |
+
# This file is part of SumeCzech corpus <http://hdl.handle.net/11234/1-2615>.
|
4 |
+
#
|
5 |
+
# Copyright 2018 Institute of Formal and Applied Linguistics, Faculty of
|
6 |
+
# Mathematics and Physics, Charles University in Prague, Czech Republic.
|
7 |
+
#
|
8 |
+
# This Source Code Form is subject to the terms of the Mozilla Public
|
9 |
+
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
10 |
+
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
11 |
+
|
12 |
+
import json
|
13 |
+
import hashlib
|
14 |
+
|
15 |
+
from downloader_extractor_utils import *
|
16 |
+
|
17 |
+
URL = 'url'
|
18 |
+
HEADLINE = 'headline'
|
19 |
+
ABSTRACT = 'abstract'
|
20 |
+
TEXT = 'text'
|
21 |
+
SECTION = 'section'
|
22 |
+
SUBDOMAIN = 'subdomain'
|
23 |
+
FILENAME = 'filename'
|
24 |
+
OFFSET = 'offset'
|
25 |
+
LENGTH = 'length'
|
26 |
+
PUBLISHED = 'published'
|
27 |
+
DATASET = 'dataset'
|
28 |
+
|
29 |
+
class Extractor(object):
|
30 |
+
@staticmethod
|
31 |
+
def extract_document(input_document):
|
32 |
+
parsed_content = Extractor._parse(input_document)
|
33 |
+
Extractor._add_checksum(parsed_content)
|
34 |
+
return parsed_content
|
35 |
+
|
36 |
+
@staticmethod
|
37 |
+
def _parse(entry_dict):
|
38 |
+
# Parse, ignoring exceptions
|
39 |
+
parsed_document = Extractor._parse_entry(entry_dict)
|
40 |
+
# normalize spacing and quotes, and additional replacements
|
41 |
+
for section_key in [HEADLINE, ABSTRACT, TEXT]:
|
42 |
+
parsed_document[section_key].encode()
|
43 |
+
parsed_document[section_key] = parsed_document[section_key].replace('\r', '').replace(' ', ' ')
|
44 |
+
parsed_document[section_key] = re.sub(r'\[celá zpráva]', ' ', parsed_document[section_key], 0, re.I)
|
45 |
+
if section_key in [HEADLINE, ABSTRACT]: parsed_document[section_key] = parsed_document[section_key].replace('\n', ' ')
|
46 |
+
parsed_document[section_key] = re.sub(r'[ \t\xA0\u2028]+', ' ', parsed_document[section_key].strip())
|
47 |
+
parsed_document[section_key] = re.sub(r'[ ]*\n[ ]*', '\n', parsed_document[section_key])
|
48 |
+
parsed_document[section_key] = re.sub(r"['`‚‘’]{1,2}", '"', parsed_document[section_key])
|
49 |
+
parsed_document[section_key] = re.sub(r"[„“]", '"', parsed_document[section_key])
|
50 |
+
return parsed_document
|
51 |
+
|
52 |
+
@staticmethod
|
53 |
+
def _parse_entry(contents):
|
54 |
+
"""Parse one commoncrawl JSON.
|
55 |
+
|
56 |
+
Return:
|
57 |
+
- error status
|
58 |
+
- text
|
59 |
+
- abstract
|
60 |
+
- headline
|
61 |
+
- subdomain (i.e., domaci.novinky.cz)
|
62 |
+
|
63 |
+
More specifically:
|
64 |
+
return status, url, abstract_len, document_len, headline, abstract, document, section, subdomain, filename
|
65 |
+
"""
|
66 |
+
url = contents['url']
|
67 |
+
parse = urlparse(url)
|
68 |
+
domain = '.'.join(parse.netloc.rsplit('.', maxsplit=2)[-2:])
|
69 |
+
subdomain = parse.netloc.replace('www.', '')
|
70 |
+
if domain == subdomain:
|
71 |
+
section = (parse.path.split('/') + [''])[1]
|
72 |
+
if not section.isalnum():
|
73 |
+
section = subdomain
|
74 |
+
else:
|
75 |
+
section = subdomain
|
76 |
+
if 'blog' in section:
|
77 |
+
section = 'blogs'
|
78 |
+
|
79 |
+
# get domain/subdomain settings
|
80 |
+
# some subdomains require specific settings
|
81 |
+
# if not, use generic settings for the domain
|
82 |
+
domain_settings = domain_settings_dict.get(subdomain, domain_settings_dict.get(domain, None))
|
83 |
+
|
84 |
+
# parse
|
85 |
+
# The non-UTF8 pages sometimes are encoded in UTF-8 -- try it first,
|
86 |
+
# and fallback to the non-UTF8 encoding
|
87 |
+
try:
|
88 |
+
warc = contents['content'].encode('latin-1').decode('utf-8')
|
89 |
+
except:
|
90 |
+
warc = contents['content'].encode('latin-1').decode(domain_settings.encoding)
|
91 |
+
html = warc.split('\r\n\r\n', maxsplit=2)[-1].replace('\r', '')
|
92 |
+
soup = BeautifulSoup(html, 'html.parser')
|
93 |
+
|
94 |
+
# replace br by a newline
|
95 |
+
for br in soup('br'):
|
96 |
+
br.replace_with('\n')
|
97 |
+
|
98 |
+
# get headline
|
99 |
+
headline_text = domain_settings.headline_extractor(soup)
|
100 |
+
|
101 |
+
# get abstract
|
102 |
+
abstract_text = domain_settings.abstract_extractor(soup)
|
103 |
+
|
104 |
+
# get document
|
105 |
+
document_text = domain_settings.document_extractor(soup, domain)
|
106 |
+
|
107 |
+
published = domain_settings.date_extractor(soup)
|
108 |
+
if published is None: published = ""
|
109 |
+
|
110 |
+
return {URL: url,
|
111 |
+
HEADLINE: headline_text,
|
112 |
+
ABSTRACT: abstract_text,
|
113 |
+
TEXT: document_text,
|
114 |
+
SECTION: section,
|
115 |
+
SUBDOMAIN: subdomain,
|
116 |
+
FILENAME: contents[FILENAME],
|
117 |
+
OFFSET: contents[OFFSET],
|
118 |
+
LENGTH: contents[LENGTH],
|
119 |
+
PUBLISHED: published,
|
120 |
+
DATASET: contents[DATASET]}
|
121 |
+
|
122 |
+
@staticmethod
|
123 |
+
def _add_checksum(json_data):
|
124 |
+
json_data_for_checksum = {}
|
125 |
+
|
126 |
+
for field in ["headline", "abstract", "text", "section", "subdomain", "published", "url"]:
|
127 |
+
json_data_for_checksum[field] = json_data[field]
|
128 |
+
|
129 |
+
string_for_checksum = json.dumps(json_data_for_checksum,
|
130 |
+
ensure_ascii=True,
|
131 |
+
sort_keys=True,
|
132 |
+
indent=None,
|
133 |
+
separators=(",", ":"))
|
134 |
+
|
135 |
+
json_data['md5'] = hashlib.md5(string_for_checksum.encode('utf-8')).hexdigest()
|
136 |
+
|
137 |
+
return json_data
|
downloader_extractor_utils.py
ADDED
@@ -0,0 +1,350 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
#
|
3 |
+
# This file is part of SumeCzech corpus <http://hdl.handle.net/11234/1-2615>.
|
4 |
+
#
|
5 |
+
# Copyright 2018 Institute of Formal and Applied Linguistics, Faculty of
|
6 |
+
# Mathematics and Physics, Charles University in Prague, Czech Republic.
|
7 |
+
#
|
8 |
+
# This Source Code Form is subject to the terms of the Mozilla Public
|
9 |
+
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
10 |
+
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
11 |
+
|
12 |
+
from collections import namedtuple
|
13 |
+
from datetime import datetime
|
14 |
+
import json
|
15 |
+
import re
|
16 |
+
from urllib.parse import urlparse
|
17 |
+
|
18 |
+
from bs4 import BeautifulSoup
|
19 |
+
import dateutil.parser
|
20 |
+
import dateutil.tz
|
21 |
+
import regex
|
22 |
+
|
23 |
+
DomainSettings = namedtuple("DomainSettings",
|
24 |
+
["encoding",
|
25 |
+
"l_tag", "l_class",
|
26 |
+
"document_extractor",
|
27 |
+
"abstract_extractor",
|
28 |
+
"headline_extractor",
|
29 |
+
"date_extractor"
|
30 |
+
]
|
31 |
+
)
|
32 |
+
|
33 |
+
month_mapping = {
|
34 |
+
"ledna": 1,
|
35 |
+
"ůnora": 2,
|
36 |
+
"února": 2,
|
37 |
+
"března": 3,
|
38 |
+
"dubna": 4,
|
39 |
+
"května": 5,
|
40 |
+
"června": 6,
|
41 |
+
"července": 7,
|
42 |
+
"srpna": 8,
|
43 |
+
"září": 9,
|
44 |
+
"října": 10,
|
45 |
+
"listopadu": 11,
|
46 |
+
"prosince": 12,
|
47 |
+
}
|
48 |
+
|
49 |
+
date_re = re.compile(r"(\d{1,2})\.\s([^\W\d_]+)\s(\d{4}),?\s(\d{1,2}):(\d{2})")
|
50 |
+
cet_tz = dateutil.tz.gettz("CET")
|
51 |
+
author_re = re.compile("<p[^<]*(Autor:|FOTO:).*?</p>", flags=re.DOTALL+re.MULTILINE+re.IGNORECASE)
|
52 |
+
multiple_linebreaks_re = re.compile(r"\n\s*\n", flags=re.MULTILINE)
|
53 |
+
pp_re = re.compile(r"</(h4|p)>\s*<(h4|p)>", flags=re.MULTILINE)
|
54 |
+
|
55 |
+
# regexes for heuristics to remove extra links
|
56 |
+
ctete_take_re = re.compile("Čtěte také.*", flags=re.DOTALL+re.MULTILINE+re.IGNORECASE)
|
57 |
+
|
58 |
+
caption_re = re.compile("\w\s*$", flags=re.DOTALL+re.MULTILINE)
|
59 |
+
author_re = re.compile("<p[^<]*(Autor:|FOTO:).*?</p>", flags=re.DOTALL+re.MULTILINE+re.IGNORECASE)
|
60 |
+
multiple_linebreaks_re = re.compile(r"\n\s*\n", flags=re.MULTILINE)
|
61 |
+
pp_re = re.compile(r"</(h4|p)>\s*<(h4|p)>", flags=re.MULTILINE)
|
62 |
+
|
63 |
+
### NOVINKY
|
64 |
+
novinky_starts_of_interest_re = re.compile("Nové knihy|Nová DVD|Nová CD|Premiéry|Vánoční knihy|Vánoční DVD|Vánoční CD")
|
65 |
+
|
66 |
+
### LIDOVKY
|
67 |
+
lidovky_starts_of_interest_re = re.compile(r"Mistrovství Evropy ve fotbale 2016|MS v ledním hokeji|Kryje inspekce policejní zátaras z lidí\? O šetření nehody na D1 rozhodne soud")
|
68 |
+
|
69 |
+
### DENIK
|
70 |
+
denik_starts_of_interest_re = re.compile("Právě jsme se narodila. Vaše miminka|Právě jsme se narodili|Pozvánk")
|
71 |
+
denik_abstract_fix_re = regex.compile(r"(.*?(/[\p{Lu} ,-]*?/)? ?[-–])|(/[\p{Lu} ,-]*?/)")
|
72 |
+
|
73 |
+
def parse_czech_month_date(datetime_str):
|
74 |
+
'''Parse date in format with Czech month names.
|
75 |
+
Used for:
|
76 |
+
- lidovky.cz (dates like '2. ledna 2012 19:47')
|
77 |
+
- novinky.cz (dates like 'pondělí 4. srpna 2003, 1:57')
|
78 |
+
'''
|
79 |
+
match = date_re.search(datetime_str.lower())
|
80 |
+
if match is not None:
|
81 |
+
dt = datetime(int(match.group(3)), month_mapping[match.group(2)], int(match.group(1)), # YMD
|
82 |
+
int(match.group(4)), int(match.group(5)), 0, 0, cet_tz) #HMS us timezone
|
83 |
+
return dt.strftime("%Y-%m-%dT%H:%M:%S%z")
|
84 |
+
return None
|
85 |
+
|
86 |
+
def parse_iso_date(datetime_str):
|
87 |
+
'''Parse date in ISO format.
|
88 |
+
Used for:
|
89 |
+
- denik.cz (dates like '2009-08-20T21:00:00+02:00')
|
90 |
+
- idnes.cz (dates like '2015-12-17T06:57CET')
|
91 |
+
'''
|
92 |
+
# parse date
|
93 |
+
dt = dateutil.parser.parse(datetime_str)
|
94 |
+
# normalize timezone
|
95 |
+
if dt.tzinfo is None:
|
96 |
+
dt = dt.replace(tzinfo=cet_tz)
|
97 |
+
dt = dt.astimezone(cet_tz)
|
98 |
+
# format output
|
99 |
+
return dt.strftime("%Y-%m-%dT%H:%M:%S%z")
|
100 |
+
|
101 |
+
# generic
|
102 |
+
def dont_clean(raw_text):
|
103 |
+
'''Dummy function for text that doesn't need cleaning.
|
104 |
+
'''
|
105 |
+
return raw_text
|
106 |
+
|
107 |
+
def abstract_to_text(raw_abstract):
|
108 |
+
'''Clean abstract.
|
109 |
+
'''
|
110 |
+
return raw_abstract.text.strip()
|
111 |
+
|
112 |
+
def process_text(raw_text, domain, clean_domain_specific_text):
|
113 |
+
'''Clean text.
|
114 |
+
'''
|
115 |
+
# domain-independent: remove tags not belonging to the text
|
116 |
+
for tag in raw_text(['script', 'h1', 'h2', 'h3', 'img']):
|
117 |
+
tag.extract()
|
118 |
+
|
119 |
+
# domain-specific
|
120 |
+
raw_text = clean_domain_specific_text(raw_text)
|
121 |
+
|
122 |
+
# add linebreak between paragraphs; ugly but it works
|
123 |
+
raw_text = author_re.sub('', pp_re.sub('</p>\n<p>', str(raw_text)))
|
124 |
+
cleaned_text = BeautifulSoup(raw_text, 'html.parser').text
|
125 |
+
|
126 |
+
if domain == 'denik.cz':
|
127 |
+
cleaned_text = ctete_take_re.sub('', cleaned_text)
|
128 |
+
|
129 |
+
# clean multiple linebreaks
|
130 |
+
cleaned_text = multiple_linebreaks_re.sub('\n', cleaned_text.replace(' ', '\n').strip())
|
131 |
+
return cleaned_text
|
132 |
+
|
133 |
+
def remove_headline_intro(headline):
|
134 |
+
'''Remove some non-informative headline intros'''
|
135 |
+
return re.sub(r'^(VIDEO|OBRAZEM|Autofotka týdne|Finanční poradna|Tipy na víkend' +
|
136 |
+
'|RECENZE|Český poutník|Kam o víkendu s dětmi|TEST|Tip na výlet' +
|
137 |
+
'|KOMENTÁŘ|Průzkum|S kamerou na cestách|Video týdne|Rady do zahrady' +
|
138 |
+
'|POHNUTÉ OSUDY|ANALÝZA|Test|BAZAR|Putování s BBC|Co vám uniklo|ON-LINE' +
|
139 |
+
'|Potvrzeno|ANKETA|Otázky čtenářů|Poslední slovo|Je to oficiální' +
|
140 |
+
'|GLOSA|PŘEHLEDNĚ|ROZHOVOR|Výzkum|NÁZOR|ON-LINE CHAT|Na poslední chvíli' +
|
141 |
+
'|TOP \d+ TÝDNE|Dlouhodobý test|FOTO|FOTO, VIDEO|DOKUMENT|EXKLU[SZ]IVNĚ' +
|
142 |
+
'|CO VÁS ZAUJME|ANIMACE|ON-LINE REPORTÁŽ|BYDLENÍ|SOUTĚŽ|RETRO|AUDIO' +
|
143 |
+
'|KRÁTCE|AUTOVIDEA ROKU|REPORTÁŽ|PODÍVEJTE SE|VIDEOTEST|Ukázka|TÉMA' +
|
144 |
+
'|\d+\.\s*kolo(\s+ELH)?)\s*:\s*', '', headline, flags=re.I)
|
145 |
+
|
146 |
+
def process_headline(raw_headline, clean_headline):
|
147 |
+
headline = raw_headline.text
|
148 |
+
headline = remove_headline_intro(headline)
|
149 |
+
headline = clean_headline(headline)
|
150 |
+
return headline
|
151 |
+
|
152 |
+
|
153 |
+
def generic_headline_extractor(headline_cleaner):
|
154 |
+
def f(soup):
|
155 |
+
headline_list = soup.find_all('h1')
|
156 |
+
|
157 |
+
# headline found, clean it
|
158 |
+
headline_text = process_headline(headline_list[0], headline_cleaner)
|
159 |
+
return headline_text
|
160 |
+
|
161 |
+
return f
|
162 |
+
|
163 |
+
|
164 |
+
def ceskenoviny_headline_extractor(soup):
|
165 |
+
headline_soup = soup.find('h1', itemprop='name')
|
166 |
+
headline_text = headline_soup.text.strip()
|
167 |
+
return headline_text
|
168 |
+
|
169 |
+
|
170 |
+
def generic_abstract_extractor(s_tag, s_class, abstract_cleaner):
|
171 |
+
def f(soup):
|
172 |
+
abstract_soup = soup.find(s_tag, class_=s_class)
|
173 |
+
abstract_text = abstract_cleaner(abstract_soup)
|
174 |
+
return abstract_text
|
175 |
+
|
176 |
+
return f
|
177 |
+
|
178 |
+
|
179 |
+
def ceskenoviny_abstract_extractor(soup):
|
180 |
+
# check for article
|
181 |
+
article_soup = soup.find('div', itemprop='articleBody')
|
182 |
+
abstract_soup = article_soup.find('p', itemprop='description')
|
183 |
+
if '-' in abstract_soup.text:
|
184 |
+
abstract_text = abstract_soup.text.split('-', maxsplit=1)[1].strip()
|
185 |
+
else:
|
186 |
+
abstract_text = abstract_soup.text.strip()
|
187 |
+
abstract_soup.extract()
|
188 |
+
return abstract_text
|
189 |
+
|
190 |
+
|
191 |
+
def generic_text_extractor(t_tag, t_class, text_cleaner):
|
192 |
+
def f(soup, domain):
|
193 |
+
# check for the full text
|
194 |
+
document_soup = soup.find(t_tag, class_=t_class)
|
195 |
+
document_text = process_text(document_soup, domain, text_cleaner)
|
196 |
+
return document_text
|
197 |
+
return f
|
198 |
+
|
199 |
+
|
200 |
+
def ceskenoviny_text_extractor(soup, _):
|
201 |
+
article_soup = soup.find('div', itemprop='articleBody')
|
202 |
+
for tag in article_soup(['div', 'ul', 'script', 'img']):
|
203 |
+
tag.extract()
|
204 |
+
tags = article_soup.find('p', class_='tags')
|
205 |
+
if tags is not None:
|
206 |
+
tags.extract()
|
207 |
+
|
208 |
+
# add linebreak between paragraphs; ugly but it works
|
209 |
+
raw_text = author_re.sub('', pp_re.sub('</p>\n<p>', str(article_soup)))
|
210 |
+
clean_text = BeautifulSoup(raw_text, 'html.parser').text
|
211 |
+
|
212 |
+
clean_text = multiple_linebreaks_re.sub('\n', clean_text.replace(' ', '\n').strip())
|
213 |
+
return clean_text
|
214 |
+
|
215 |
+
|
216 |
+
def ceskenoviny_date_extractor(soup):
|
217 |
+
dt_str = soup.find('span', itemprop='datePublished').text
|
218 |
+
dt = datetime.strptime(dt_str, "%d.%m.%Y, %H:%M").replace(tzinfo=cet_tz)
|
219 |
+
published = dt.strftime("%Y-%m-%dT%H:%M:%S%z")
|
220 |
+
return published
|
221 |
+
|
222 |
+
# domain-specific cleaner functions
|
223 |
+
### NOVINKY
|
224 |
+
def clean_novinky_headline(headline):
|
225 |
+
if novinky_starts_of_interest_re.match(headline) is not None and\
|
226 |
+
'-' in headline:
|
227 |
+
return headline.split('-', maxsplit=1)[1].strip()
|
228 |
+
return headline
|
229 |
+
|
230 |
+
def clean_novinky_text(raw_text):
|
231 |
+
'''Clean text from novinky.cz
|
232 |
+
'''
|
233 |
+
# photos, videos
|
234 |
+
for tag in raw_text('div', ['articlePhotos', 'articleVideo']):
|
235 |
+
tag.extract()
|
236 |
+
# picture and video descriptions
|
237 |
+
for tag in raw_text('p', ['acmDescription', 'acmAuthor']):
|
238 |
+
tag.extract()
|
239 |
+
# picture and video descriptions: authors
|
240 |
+
for tag in raw_text('p', string=author_re):
|
241 |
+
tag.extract()
|
242 |
+
# subcaptions
|
243 |
+
for tag in raw_text('h4', string=caption_re):
|
244 |
+
tag.extract()
|
245 |
+
# see also, galleries, etc.
|
246 |
+
for tag in raw_text('table', 'table_1'):
|
247 |
+
tag.extract()
|
248 |
+
|
249 |
+
return raw_text
|
250 |
+
|
251 |
+
def novinky_date(soup):
|
252 |
+
'''Example:
|
253 |
+
<p id="articleDate" class="publicDate"> čtvrtek 31. července 2003, 13:22 </p>
|
254 |
+
'''
|
255 |
+
date_soup = soup.find('p', id='articleDate')
|
256 |
+
if date_soup is None:
|
257 |
+
return None
|
258 |
+
return parse_czech_month_date(date_soup.text.split('-')[0].strip())
|
259 |
+
|
260 |
+
### LIDOVKY
|
261 |
+
def clean_lidovky_text(raw_text):
|
262 |
+
'''Clean text from lidovky.cz
|
263 |
+
'''
|
264 |
+
# see also, galleries, etc.
|
265 |
+
for tag in raw_text('table', ['not4bbtext']):
|
266 |
+
tag.extract()
|
267 |
+
for tag in raw_text('blockquote', class_='twitter-tweet'):
|
268 |
+
tag.extract()
|
269 |
+
# remove the abstract, if present
|
270 |
+
for tag in raw_text('p', ['opener']):
|
271 |
+
tag.extract()
|
272 |
+
return raw_text
|
273 |
+
|
274 |
+
def clean_lidovky_abstract(abstract):
|
275 |
+
for tag in abstract('span', 'domicil'):
|
276 |
+
tag.extract()
|
277 |
+
return abstract.text.strip()
|
278 |
+
|
279 |
+
def lidovky_date(soup):
|
280 |
+
'''Example:
|
281 |
+
<span class="time">
|
282 |
+
2. ledna 2012 19:47
|
283 |
+
</span>
|
284 |
+
'''
|
285 |
+
date_soup = soup.find('span', class_='time')
|
286 |
+
if date_soup is None:
|
287 |
+
return None
|
288 |
+
datetime_str = date_soup.text.split(',')[0].strip().replace(' ', ' ')
|
289 |
+
return parse_czech_month_date(datetime_str)
|
290 |
+
|
291 |
+
### DENIK
|
292 |
+
def clean_denik_abstract(abstract):
|
293 |
+
abstract = abstract.text.strip()
|
294 |
+
abstract = denik_abstract_fix_re.sub('', abstract.rstrip('-–')).strip()
|
295 |
+
return abstract
|
296 |
+
|
297 |
+
def denik_date(soup):
|
298 |
+
'''Example:
|
299 |
+
<meta property="article:published_time" content="2011-01-24T20:00:00+01:00">\
|
300 |
+
'''
|
301 |
+
date_soup = soup.find('meta', property="article:published_time")
|
302 |
+
if date_soup is None:
|
303 |
+
return None
|
304 |
+
return parse_iso_date(date_soup['content'])
|
305 |
+
|
306 |
+
### IDNES
|
307 |
+
def idnes_date(soup):
|
308 |
+
'''Example:
|
309 |
+
<span class="time-date" itemprop="datePublished" content="2012-07-02T15:18CET">
|
310 |
+
'''
|
311 |
+
date_soup = soup.find('span', itemprop="datePublished")
|
312 |
+
if date_soup is None:
|
313 |
+
return None
|
314 |
+
return parse_iso_date(date_soup['content'].strip())
|
315 |
+
|
316 |
+
### CESKENOVINY
|
317 |
+
|
318 |
+
### Final config dictionary
|
319 |
+
domain_settings_dict = {
|
320 |
+
'novinky.cz': DomainSettings('utf-8',
|
321 |
+
None, None,
|
322 |
+
generic_text_extractor('div','articleBody', clean_novinky_text),
|
323 |
+
generic_abstract_extractor('p', 'perex', abstract_to_text),
|
324 |
+
generic_headline_extractor(clean_novinky_headline),
|
325 |
+
novinky_date),
|
326 |
+
'lidovky.cz': DomainSettings('windows-1250',
|
327 |
+
'div', 'list-art',
|
328 |
+
generic_text_extractor('div', 'text', clean_lidovky_text),
|
329 |
+
generic_abstract_extractor(['div', 'p'], 'opener', clean_lidovky_abstract),
|
330 |
+
generic_headline_extractor(dont_clean),
|
331 |
+
lidovky_date),
|
332 |
+
'idnes.cz': DomainSettings('windows-1250',
|
333 |
+
None, None,
|
334 |
+
generic_text_extractor('div', 'text', dont_clean),
|
335 |
+
generic_abstract_extractor('div', 'opener', abstract_to_text),
|
336 |
+
generic_headline_extractor(dont_clean),
|
337 |
+
idnes_date),
|
338 |
+
'denik.cz': DomainSettings('utf-8',
|
339 |
+
None, None,
|
340 |
+
generic_text_extractor('div', 'bbtext', dont_clean),
|
341 |
+
generic_abstract_extractor('p', 'perex', clean_denik_abstract),
|
342 |
+
generic_headline_extractor(dont_clean),
|
343 |
+
denik_date),
|
344 |
+
'ceskenoviny.cz': DomainSettings('utf-8',
|
345 |
+
None, None,
|
346 |
+
ceskenoviny_text_extractor,
|
347 |
+
ceskenoviny_abstract_extractor,
|
348 |
+
ceskenoviny_headline_extractor,
|
349 |
+
ceskenoviny_date_extractor)
|
350 |
+
}
|
gitattributes
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
27 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
36 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
37 |
+
# Audio files - uncompressed
|
38 |
+
*.pcm filter=lfs diff=lfs merge=lfs -text
|
39 |
+
*.sam filter=lfs diff=lfs merge=lfs -text
|
40 |
+
*.raw filter=lfs diff=lfs merge=lfs -text
|
41 |
+
# Audio files - compressed
|
42 |
+
*.aac filter=lfs diff=lfs merge=lfs -text
|
43 |
+
*.flac filter=lfs diff=lfs merge=lfs -text
|
44 |
+
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
45 |
+
*.ogg filter=lfs diff=lfs merge=lfs -text
|
46 |
+
*.wav filter=lfs diff=lfs merge=lfs -text
|
47 |
+
# Image files - uncompressed
|
48 |
+
*.bmp filter=lfs diff=lfs merge=lfs -text
|
49 |
+
*.gif filter=lfs diff=lfs merge=lfs -text
|
50 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
51 |
+
*.tiff filter=lfs diff=lfs merge=lfs -text
|
52 |
+
# Image files - compressed
|
53 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
54 |
+
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
+
*.webp filter=lfs diff=lfs merge=lfs -text
|
56 |
+
# Sumeczech files - uncompressed and compressed
|
57 |
+
*.jsonl filter=lfs diff=lfs merge=lfs text=auto
|
58 |
+
*.jsonl.xz filter=lfs diff=lfs merge=lfs -text
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
beautifulsoup4 == 4.6.0
|
2 |
+
python-dateutil == 2.6.1
|
3 |
+
regex
|
rouge_raw.py
ADDED
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
#
|
3 |
+
# This file is part of SumeCzech corpus <http://hdl.handle.net/11234/1-2615>.
|
4 |
+
#
|
5 |
+
# Copyright 2018 Institute of Formal and Applied Linguistics, Faculty of
|
6 |
+
# Mathematics and Physics, Charles University in Prague, Czech Republic.
|
7 |
+
#
|
8 |
+
# This Source Code Form is subject to the terms of the Mozilla Public
|
9 |
+
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
10 |
+
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
11 |
+
|
12 |
+
import re
|
13 |
+
|
14 |
+
class RougeRaw:
|
15 |
+
"""Compute RougeRAW-1, RougeRAW-2, RougeRAW-L metrics."""
|
16 |
+
|
17 |
+
class FScore:
|
18 |
+
"""F1 score representation."""
|
19 |
+
def __init__(self, correct, gold, system):
|
20 |
+
self.p = correct / system if system else 0.
|
21 |
+
self.r = correct / gold if gold else 0.
|
22 |
+
self.f = 2 * correct / (system + gold) if system + gold else 0.
|
23 |
+
|
24 |
+
def _rouge_n(self, n, gold_words, system_words):
|
25 |
+
"""Compute Rouge-n for given words."""
|
26 |
+
def n_grams(n, words):
|
27 |
+
ngrams = {}
|
28 |
+
total = 0
|
29 |
+
for i in range(len(words) - n + 1):
|
30 |
+
ngram = "\t".join(words[i:i + n])
|
31 |
+
ngrams[ngram] = 1 + ngrams.get(ngram, 0)
|
32 |
+
total += 1
|
33 |
+
return ngrams, total
|
34 |
+
|
35 |
+
gold_ngrams, gold_total = n_grams(n, gold_words)
|
36 |
+
system_ngrams, system_total = n_grams(n, system_words)
|
37 |
+
|
38 |
+
intersection = 0
|
39 |
+
for ngram in system_ngrams:
|
40 |
+
intersection += min(system_ngrams[ngram], gold_ngrams.get(ngram, 0))
|
41 |
+
|
42 |
+
return self.FScore(intersection, gold_total, system_total)
|
43 |
+
|
44 |
+
def _rouge_l(self, gold_words, system_words):
|
45 |
+
"""Compute Rouge-L for given words."""
|
46 |
+
lcs = [[0] * len(system_words) for _ in gold_words]
|
47 |
+
for r in range(len(gold_words)):
|
48 |
+
for s in range(len(system_words)):
|
49 |
+
if gold_words[r] == system_words[s]:
|
50 |
+
lcs[r][s] = 1 + (lcs[r - 1][s - 1] if r and s else 0)
|
51 |
+
lcs[r][s] = max(lcs[r][s], lcs[r - 1][s] if r else 0)
|
52 |
+
lcs[r][s] = max(lcs[r][s], lcs[r][s - 1] if s else 0)
|
53 |
+
|
54 |
+
return self.FScore(lcs[-1][-1], len(gold_words), len(system_words))
|
55 |
+
|
56 |
+
def _tokenize(self, text):
|
57 |
+
"""Tokenize given text."""
|
58 |
+
return re.sub(r"\s+", " ", re.sub(r"\b", " ", text, re.UNICODE), re.UNICODE).strip().split(" ")
|
59 |
+
|
60 |
+
def document(self, gold, system):
|
61 |
+
"""Compute RougeRAW-1, RougeRAW-2, RougeRAW-L for given documents.
|
62 |
+
|
63 |
+
Each document should be a string.
|
64 |
+
"""
|
65 |
+
|
66 |
+
assert isinstance(gold, str) and isinstance(system, str), "Expected string arguments"
|
67 |
+
|
68 |
+
lc_gold_words = [word.lower() for word in self._tokenize(gold)]
|
69 |
+
lc_system_words = [word.lower() for word in self._tokenize(system)]
|
70 |
+
|
71 |
+
return {
|
72 |
+
"1": self._rouge_n(1, lc_gold_words, lc_system_words),
|
73 |
+
"2": self._rouge_n(2, lc_gold_words, lc_system_words),
|
74 |
+
"L": self._rouge_l(lc_gold_words, lc_system_words),
|
75 |
+
}
|
76 |
+
|
77 |
+
def corpus(self, gold, system):
|
78 |
+
"""Compute RougeRAW-1, RougeRAW-2, RougeRAW-L for given corpora.
|
79 |
+
|
80 |
+
Each corpus should be a collection of documents, each document a string.
|
81 |
+
"""
|
82 |
+
|
83 |
+
assert isinstance(gold, list) and isinstance(system, list), "Expected list arguments"
|
84 |
+
assert len(gold) == len(system), "Given corpora should be of the same length"
|
85 |
+
|
86 |
+
rouge = {key: self.FScore(0, 0, 0) for key in ["1", "2", "L"]}
|
87 |
+
|
88 |
+
if len(gold):
|
89 |
+
for gold_document, system_document in zip(gold, system):
|
90 |
+
for key, value in self.document(gold_document, system_document).items():
|
91 |
+
rouge[key].p += value.p
|
92 |
+
rouge[key].r += value.r
|
93 |
+
rouge[key].f += value.f
|
94 |
+
|
95 |
+
for key in rouge:
|
96 |
+
rouge[key].p /= len(gold)
|
97 |
+
rouge[key].r /= len(gold)
|
98 |
+
rouge[key].f /= len(gold)
|
99 |
+
|
100 |
+
return rouge
|
101 |
+
|
102 |
+
|
103 |
+
if __name__ == "__main__":
|
104 |
+
import argparse
|
105 |
+
import json
|
106 |
+
|
107 |
+
parser = argparse.ArgumentParser()
|
108 |
+
parser.add_argument("gold", type=str, help="Gold jsonl file path")
|
109 |
+
parser.add_argument("system", type=str, help="System jsonl output file")
|
110 |
+
parser.add_argument("field", type=str, help="Which jsonl field to compare")
|
111 |
+
args = parser.parse_args()
|
112 |
+
|
113 |
+
gold = []
|
114 |
+
with open(args.gold, "r", encoding="utf-8") as gold_file:
|
115 |
+
for gold_line in gold_file:
|
116 |
+
gold.append(json.loads(gold_line)[args.field])
|
117 |
+
|
118 |
+
system = []
|
119 |
+
with open(args.system, "r", encoding="utf-8") as system_file:
|
120 |
+
for system_line in system_file:
|
121 |
+
system.append(json.loads(system_line)[args.field])
|
122 |
+
|
123 |
+
rouge = RougeRaw().corpus(gold, system)
|
124 |
+
print(" RougeRAW-1 RougeRAW-2 RougeRAW-L")
|
125 |
+
print(" P R F P R F P R F")
|
126 |
+
for metric in ["1", "2", "L"]:
|
127 |
+
print("{:04.1f} {:04.1f} {:04.1f}{}".format(
|
128 |
+
100 * rouge[metric].p,
|
129 |
+
100 * rouge[metric].r,
|
130 |
+
100 * rouge[metric].f,
|
131 |
+
"\n" if metric == "L" else " "), end="")
|
sumeczech-1.0-dev.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:273022001f42e18ec8aa6d7ce2d76b1795351a01c0ec831a669f9339515b8475
|
3 |
+
size 170242935
|
sumeczech-1.0-index.jsonl.xz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:99f6b9a7c1ff9f2d9e1b25ada4162b2612250cba4fb3c7116aae306ef4aca208
|
3 |
+
size 62118412
|
sumeczech-1.0-oodtest.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:848a735b389d6c6d901d7134174da5041cdbaad825efe442c7f1b7b71ed97d26
|
3 |
+
size 155927183
|
sumeczech-1.0-test.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
sumeczech-1.0-train.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a1f9842efc0fcb392e42489223e5677fb78f6fc99421cb9dc94b089f217d1628
|
3 |
+
size 3305682041
|