File size: 27,993 Bytes
f124f0d
 
2fa14ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2af00a8
 
 
 
 
 
 
 
 
 
 
 
 
 
bbe1906
 
 
 
 
 
 
 
 
 
 
 
 
 
4a66734
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90778b4
 
 
 
 
 
 
 
 
 
 
 
 
 
051d306
 
 
 
 
 
 
 
 
 
 
 
 
 
2e360ab
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fd34894
 
 
 
 
 
 
 
 
 
 
 
 
 
d661111
 
 
 
 
 
 
 
 
 
 
 
 
 
479701b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2421b5b
 
 
 
 
 
 
 
 
 
 
 
 
 
a29438b
 
 
 
 
 
 
 
 
 
 
 
 
 
e210beb
 
 
 
 
 
d8ba132
e210beb
 
 
 
 
 
 
d8ba132
e210beb
fcaea10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e6bc8d8
 
 
 
 
2064e73
 
e6bc8d8
 
 
 
 
 
f124f0d
2fa14ef
 
 
 
2af00a8
 
 
 
bbe1906
 
 
 
4a66734
 
 
 
2f4142f
90778b4
 
 
 
051d306
 
 
 
2e360ab
 
 
 
fd34894
 
 
 
d661111
 
 
 
479701b
 
 
 
2421b5b
 
 
 
a29438b
 
 
 
e210beb
 
 
 
fcaea10
 
 
 
e6bc8d8
 
 
 
b4bf111
 
 
 
 
 
 
 
 
 
 
cf92055
2854848
 
 
 
 
 
6914cbf
2854848
 
 
 
 
 
 
 
 
 
 
 
6914cbf
2854848
 
 
 
 
 
 
 
 
6914cbf
2854848
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6914cbf
2854848
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6914cbf
2854848
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96debd9
2854848
 
 
 
 
 
 
 
 
 
6914cbf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2854848
 
 
 
 
6914cbf
2854848
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
---
dataset_info:
- config_name: eng-ces.both_contexts
  features:
  - name: eng
    dtype: string
  - name: eng_context
    dtype: string
  - name: ces_context
    dtype: string
  - name: ces
    dtype: string
  splits:
  - name: train
    num_bytes: 99249281542
    num_examples: 16312023
  download_size: 50311612769
  dataset_size: 99249281542
- config_name: eng-ces.src_contexts
  features:
  - name: eng
    dtype: string
  - name: eng_context
    dtype: string
  - name: ces
    dtype: string
  splits:
  - name: train
    num_bytes: 55783391633
    num_examples: 18718104
  download_size: 27949833416
  dataset_size: 55783391633
- config_name: eng-ces.trg_contexts
  features:
  - name: eng
    dtype: string
  - name: ces_context
    dtype: string
  - name: ces
    dtype: string
  splits:
  - name: train
    num_bytes: 67790203254
    num_examples: 21000099
  download_size: 35682681930
  dataset_size: 67790203254
- config_name: eng-deu.both_contexts
  features:
  - name: eng
    dtype: string
  - name: eng_context
    dtype: string
  - name: deu_context
    dtype: string
  - name: deu
    dtype: string
  splits:
  - name: train
    num_bytes: 544626482766
    num_examples: 92066559
  download_size: 287393903524
  dataset_size: 544626482766
- config_name: eng-deu.src_contexts
  features:
  - name: eng
    dtype: string
  - name: eng_context
    dtype: string
  - name: deu
    dtype: string
  splits:
  - name: train
    num_bytes: 305555617347
    num_examples: 105641972
  download_size: 163549986986
  dataset_size: 305555617347
- config_name: eng-deu.trg_contexts
  features:
  - name: eng
    dtype: string
  - name: deu_context
    dtype: string
  - name: deu
    dtype: string
  splits:
  - name: train
    num_bytes: 355001902675
    num_examples: 110317948
  download_size: 189296787255
  dataset_size: 355001902675
- config_name: eng-fra.both_contexts
  features:
  - name: eng
    dtype: string
  - name: eng_context
    dtype: string
  - name: fra_context
    dtype: string
  - name: fra
    dtype: string
  splits:
  - name: train
    num_bytes: 426893899212
    num_examples: 72236079
  download_size: 230871109132
  dataset_size: 426893899212
- config_name: eng-fra.src_contexts
  features:
  - name: eng
    dtype: string
  - name: eng_context
    dtype: string
  - name: fra
    dtype: string
  splits:
  - name: train
    num_bytes: 249628324881
    num_examples: 83450135
  download_size: 137168157896
  dataset_size: 249628324881
- config_name: eng-fra.trg_contexts
  features:
  - name: eng
    dtype: string
  - name: fra_context
    dtype: string
  - name: fra
    dtype: string
  splits:
  - name: train
    num_bytes: 270469945796
    num_examples: 86300028
  download_size: 146946754213
  dataset_size: 270469945796
- config_name: eng-pol.both_contexts
  features:
  - name: eng
    dtype: string
  - name: eng_context
    dtype: string
  - name: pol_context
    dtype: string
  - name: pol
    dtype: string
  splits:
  - name: train
    num_bytes: 89716407201
    num_examples: 14889498
  download_size: 46321869504
  dataset_size: 89716407201
- config_name: eng-pol.src_contexts
  features:
  - name: eng
    dtype: string
  - name: eng_context
    dtype: string
  - name: pol
    dtype: string
  splits:
  - name: train
    num_bytes: 49301775564
    num_examples: 16803950
  download_size: 25270022217
  dataset_size: 49301775564
- config_name: eng-pol.trg_contexts
  features:
  - name: eng
    dtype: string
  - name: pol_context
    dtype: string
  - name: pol
    dtype: string
  splits:
  - name: train
    num_bytes: 59562532908
    num_examples: 18395174
  download_size: 31681850576
  dataset_size: 59562532908
- config_name: eng-rus.both_contexts
  features:
  - name: eng
    dtype: string
  - name: eng_context
    dtype: string
  - name: rus_context
    dtype: string
  - name: rus
    dtype: string
  splits:
  - name: train
    num_bytes: 18867292434
    num_examples: 2433874
  download_size: 9061303586
  dataset_size: 18867292434
- config_name: eng-rus.src_contexts
  features:
  - name: eng
    dtype: string
  - name: eng_context
    dtype: string
  - name: rus
    dtype: string
  splits:
  - name: train
    num_bytes: 9242442932
    num_examples: 3104195
  download_size: 4903481579
  dataset_size: 9242442932
- config_name: eng-rus.trg_contexts
  features:
  - name: eng
    dtype: string
  - name: rus_context
    dtype: string
  - name: rus
    dtype: string
  splits:
  - name: train
    num_bytes: 14244166125
    num_examples: 2813181
  download_size: 6539469805
  dataset_size: 14244166125
configs:
- config_name: eng-ces.both_contexts
  data_files:
  - split: train
    path: both_contexts/eng-ces/train-*
- config_name: eng-ces.src_contexts
  data_files:
  - split: train
    path: src_contexts/eng-ces/train-*
- config_name: eng-ces.trg_contexts
  data_files:
  - split: train
    path: trg_contexts/eng-ces/train-*
- config_name: eng-deu.both_contexts
  data_files:
  - split: train
    path: both_contexts/eng-deu/train-*
  default: true
- config_name: eng-deu.src_contexts
  data_files:
  - split: train
    path: src_contexts/eng-deu/train-*
- config_name: eng-deu.trg_contexts
  data_files:
  - split: train
    path: trg_contexts/eng-deu/train-*
- config_name: eng-fra.both_contexts
  data_files:
  - split: train
    path: both_contexts/eng-fra/train-*
- config_name: eng-fra.src_contexts
  data_files:
  - split: train
    path: src_contexts/eng-fra/train-*
- config_name: eng-fra.trg_contexts
  data_files:
  - split: train
    path: trg_contexts/eng-fra/train-*
- config_name: eng-pol.both_contexts
  data_files:
  - split: train
    path: both_contexts/eng-pol/train-*
- config_name: eng-pol.src_contexts
  data_files:
  - split: train
    path: src_contexts/eng-pol/train-*
- config_name: eng-pol.trg_contexts
  data_files:
  - split: train
    path: trg_contexts/eng-pol/train-*
- config_name: eng-rus.both_contexts
  data_files:
  - split: train
    path: both_contexts/eng-rus/train-*
- config_name: eng-rus.src_contexts
  data_files:
  - split: train
    path: src_contexts/eng-rus/train-*
- config_name: eng-rus.trg_contexts
  data_files:
  - split: train
    path: trg_contexts/eng-rus/train-*
task_categories:
- translation
language:
- en
- de
- fr
- cs
- pl
- ru
size_categories:
- 100M<n<1B
license: cc0-1.0
pretty_name: ParaCrawl_Context
---
# Dataset Card for ParaCrawl_Context

<!-- Provide a quick summary of the dataset. -->

This is a dataset for document-level machine translation introduced in the ACL 2024 paper [**Document-Level Machine Translation with Large-Scale Public Parallel Data**](https://aclanthology.org/2024.acl-long.712/). It is a dataset consisting of parallel sentence pairs from the [ParaCrawl](https://paracrawl.eu/) dataset along with corresponding preceding context extracted from the webpages the sentences were crawled from.

## Dataset Details

### Dataset Description

<!-- Provide a longer summary of what this dataset is. -->
This dataset adds document-level context to parallel corpora released by [ParaCrawl](https://paracrawl.eu/). This is useful for training document-level (context-aware) machine translation models, for which very few large-scale datasets exist in public. While the ParaCrawl project released large-scale parallel corpora at the sentence level, they did not preserve document context from the webpages they were originally extracted from. We used additional data sources to retrieve the contexts from the original web text, and thus create datasets that can be used to train document-level MT models.

- **Curated by:** Proyag Pal, Alexandra Birch, Kenneth Heafield, from data released by ParaCrawl
- **Language pairs:** eng-deu, eng-fra, eng-ces, eng-pol, eng-rus
- **License:** Creative Commons Zero v1.0 Universal (CC0)
- **Repository:** https://github.com/Proyag/ParaCrawl-Context
- **Paper:** https://aclanthology.org/2024.acl-long.712/

## Uses

<!-- Address questions around how the dataset is intended to be used. -->
This dataset is intended for document-level (context-aware) machine translation.

### Direct Use

<!-- This section describes suitable use cases for the dataset. -->
The ideal usage of this dataset is to use the sentence fields as the source and target translations, and provide the contexts as additional information to a model. This could be done, for example, with a dual-encoder model, where one encoder encodes the source sentence, while the second encoder encodes the source/target context. For an example, see our associated [paper](https://aclanthology.org/2024.acl-long.712/).

### Out-of-Scope Use

<!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. -->
We expect that this dataset will not work very well for the document-level translation scenario where an entire concatenated document is provided as input and a full translation is produced by the model.  
This is because of how the data was extracted - by matching sentences to their originating URLs and extracting the preceding context from - which means:

* There is no guarantee that the preceding context automatically extracted from the originating URL is related to the sentence pair at all.
* Many sentences came from multiple URLs and thus multiple contexts, so source and target contexts concatenated with source and target sentences may not produce parallel "documents" at all in many cases.

However, most examples in our datasets have a unique context, so concatenation might work better if only those examples are used.

We have not validated this experimentally, and you are encouraged to try and let us know if it works!

## Dataset Structure

<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
There are three versions of the dataset for each language pair. For a language pair SRC-TRG, they are:
- `SRC-TRG.src_contexts` - which has preceding context for only the SRC side
- `SRC-TRG.trg_contexts` - which has preceding context for only the TRG side
- `SRC-TRG.both_contexts` - which has preceding context for both SRC and TRG sides

### Data Instances
Example from `eng-deu.both_contexts`:
```yaml
{
    'eng': 'This stage is 32.8 km long and can be accomplished in 8 hours and 30 minutes.',
    'eng_context': "Cars <docline> Glungezer chair lift <docline> Patscherkofel cable cars <docline> Service <docline> Classifications of Hiking Routes <docline> Safety in the Mountains <docline> Mountain huts and alpine restaurants <docline> Guides <docline> Sport Shops <docline> Brochures and Maps <docline> Hiking <docline> Free hiking programme <docline> Hiking <docline> Hikes <docline> Long-distance walking trails <docline> Summit Tours <docline> Family hikes <docline> Education and nature trails <docline> Nature reserves <docline> Geocaching <docline> Lifts & cable cars <docline> Axamer Lizum <docline> Innsbruck Nordkette cable cars <docline> Drei-Seen-Bahn in Kühtai <docline> Muttereralm <docline> Oberperfuss Cable Cars <docline> Glungezer chair lift <docline> Patscherkofel cable cars <docline> Service <docline> Classifications of Hiking Routes <docline> Safety in the Mountains <docline> Mountain huts and alpine restaurants <docline> Guides <docline> Sport Shops <docline> Brochures and Maps <docline> today <docline> 12°C/54°F <docline> 70% Fineweather <docline> 2500mm <docline> Frostborder <docline> Tuesday <docline> 17°C/63°F <docline> 50% Fineweather <docline> 3100mm <docline> Frostborder <docline> Wednesday <docline> 18°C/64°F <docline> 40% Fineweather <docline> 3400mm <docline> Frostborder <docline> Forecast <docline> We will see a nice start to the day with sunshine. Clouds will however gradually increase at all levels producing showers in the afternoon. <docline> Tendency <docline> Air pressure will rise over Central Europe and there will be some clearer spells at times. A period of fine weather is not forecast, however. Until Thursday, sunny spells will alternate with showers in the afternoon. <docline> Need help? Contact us! <docline> Innsbruck Tourism <docline> +43 512 / 59 850 <docline> [email protected] <docline> Mon - Fri: 8.00 am - 5.00 pm <docline> Hotel- and group reservations <docline> +43 512 / 56 2000 <docline> [email protected] <docline> Mon - Fri: 9.00 am - 5.00 pm <docline> Tourist info <docline> +43 512 / 53 56-0 <docline> [email protected] <docline> Mon - Sat: 9.00 am - 5.00 pm <docline> DE <docline> EN <docline> IT <docline> FR <docline> NL <docline> ES <docline> Hikes <docline> innsbruck.info <docline> Hiking <docline> Hiking <docline> Hikes <docline> Hike with the family, as a couple or alone, short or long, to the summit or on the flat. Search out the correct route for you around Innsbruck. The filter below is here to help. Choose the length of walk, the difficulty level, duration and much more. The results will then deliver tailor-made hiking tips for your holiday. <docline> The Tyrolean section of The Way of St. James through Innsbruck <docline> https://www.innsbruck.info/fileadmin/userdaten/contwise/poi-28003079-jakobsweg_sterbach_in_muehlau_42027886.jpg <docline> Back Overview <docline> Difficulty <docline> easy <docline> Altitude up <docline> 900 METER <docline> Max. route length <docline> 81.4 KM <docline> Best season <docline> April - October <docline> Information/food <docline> GPX Download Route to start <docline> Three of the sections along the main route of The Way of St. James pass through the Innsbruck holiday region. <docline> From Terfens to Innsbruck: <docline> This stage is 24.2 kilometres long and is possible in 6 hours and 15 minutes. The Way of St. James leads from the medieval town of Hall in Tirol via the villages of Absam and Thaur, through the market town of Rum and on to the city of Innsbruck. Once in Innsbruck, the route continues to St. James' Cathedral. <docline> From Innsbruck to Pfaffenhofen: <docline>",
    'deu_context': 'mit Kindern <docline> Webcams <docline> Prospekte <docline> Aktuelle Top-Themen auf Innsbruck.info <docline> Welcome Card <docline> Innsbruck Card <docline> Bräuche im Sommer <docline> Walks to explore <docline> Innsbruck Webcams <docline> Hiking <docline> Bergwanderprogramm <docline> Wandern <docline> Wanderungen <docline> Weitwanderungen <docline> Gipfeltouren <docline> Familienwanderungen <docline> Themen- und Naturlehrpfade <docline> Naturschauplätze <docline> Geocaching <docline> Bergbahnen und Lifte <docline> Axamer Lizum <docline> Innsbrucker Nordkettenbahnen <docline> Dreiseenbahn Kühtai <docline> Muttereralm <docline> Bergbahn Oberperfuss <docline> Glungezerbahn <docline> Patscherkofelbahn <docline> Service <docline> Klassifizierung der Wanderwege <docline> Sicherheit am Berg <docline> Almhütten und Bergrestaurants <docline> Bergführer und Guides <docline> Sportshops <docline> Prospekte und Karten <docline> Hiking <docline> Bergwanderprogramm <docline> Wandern <docline> Wanderungen <docline> Weitwanderungen <docline> Gipfeltouren <docline> Familienwanderungen <docline> Themen- und Naturlehrpfade <docline> Naturschauplätze <docline> Geocaching <docline> Bergbahnen und Lifte <docline> Axamer Lizum <docline> Innsbrucker Nordkettenbahnen <docline> Dreiseenbahn Kühtai <docline> Muttereralm <docline> Bergbahn Oberperfuss <docline> Glungezerbahn <docline> Patscherkofelbahn <docline> Service <docline> Klassifizierung der Wanderwege <docline> Sicherheit am Berg <docline> Almhütten und Bergrestaurants <docline> Bergführer und Guides <docline> Sportshops <docline> Prospekte und Karten <docline> Heute <docline> 18°C <docline> 30% Sonne <docline> 3610mm <docline> Frostgrenze <docline> Dienstag <docline> 17°C <docline> 50% Sonne <docline> 3100mm <docline> Frostgrenze <docline> Mittwoch <docline> 18°C <docline> 40% Sonne <docline> 3400mm <docline> Frostgrenze <docline> Vorhersage <docline> Der Tag beginnt zunächst noch recht beschaulich und die Sonne scheint. Allerdings nimmt die Bewölkung nach und nach in allen Schichten zu und am Nachmittag kommt es dann zu Schauern. <docline> Tendenz <docline> Über Mitteleuropa steigt in der Folge der Luftdruck und zeitweise lockert es auf. Dauerhaftes Schönwetter stellt sich jedoch noch nicht ein: Bis zum Donnerstag gibt es neben Sonne vor allem jeweils nachmittags auch Schauer. <docline> Können wir helfen? Kontaktieren Sie uns! <docline> Innsbruck Tourismus <docline> +43 512 / 59 850 <docline> [email protected] <docline> Mo - Fr: 8:00 - 17:00 Uhr <docline> Hotel- u. Gruppenreservierung <docline> +43 512 / 56 2000 <docline> [email protected] <docline> Mo - Fr: 9:00 - 17:00 Uhr <docline> Tourismus Information <docline> +43 512 / 53 56-0 <docline> [email protected] <docline> Mo - Sa: 9:00 - 17:00 Uhr <docline> DE <docline> EN <docline> IT <docline> FR <docline> NL <docline> ES <docline> Wanderungen <docline> innsbruck.info <docline> Wandern <docline> Wandern <docline> Wanderungen <docline> Wandern mit Familie, zu zweit oder solo, weit oder kurz, zum Gipfelkreuz oder entspannt ohne viel Steigung. Suchen Sie sich die passende Wanderung rund um Innsbruck aus. Die Filter oberhalb der Ergebnisliste helfen dabei: Wählen Sie Streckenlänge, Schwierigkeitsgrad, Gehzeit und einiges mehr. Die Ergebnisse darunter liefern maßgeschneiderte Wandertipps für Ihren Urlaub. <docline> Tiroler Jakobsweg durch Innsbruck <docline> https://www.innsbruck.info/fileadmin/userdaten/contwise/poi-28003079-jakobsweg_sterbach_in_muehlau_42027886.jpg <docline> Zurück Zur Übersicht <docline> Schwierigkeit <docline> leicht <docline> Höhenmeter bergauf <docline> 900 METER <docline> Streckenlänge <docline> 81.4 KM <docline> Beste Jahreszeit <docline> April bis Oktober <docline> Mit Einkehrmöglichkeit <docline> GPX Download Route zum Startpunkt <docline> Drei Abschnitte der Hauptroute des Jakobswegs verlaufen durch die Ferienregion Innsbruck. <docline> Von Terfens nach Innsbruck: <docline> In 6 Stunden 15 Minuten sind die 24,2 Kilometer dieses Abschnittes zu schaffen. Von der mittelalterlichen Stadt Hall über Absam und Thaur führt der Jakobsweg durch die Marktgemeinde Rum und weiter nach Innsbruck. Dort angelangt kommt man zum Dom St.Jakob. <docline> Von Innsbruck bis Pfaffenhofen: <docline>',
    'deu': 'Der Abschnitt ist 32,8 Kilometer lang und in einer Zeit von 8 Stunden und 30 Minuten zu schaffen.'
}
```

`eng-deu.src_contexts` will have the `eng`, `eng_context`, and `deu` fields, while `eng-deu.trg_contexts` will have the `eng`, `deu_context`, and `deu` fields.

This example only has one context one each side, but there may be one or more alternative contexts separated by `|||` delimiters.


### Data Fields
For `SRC-TRG.src_contexts` or `SRC-TRG.trg_contexts`, there are 3 fields:
- `SRC` - containing the source (English) sentence.
- `TRG` - containing the target language sentence.
- `SRC_context` or `TRG_context` - containing the source/target context(s). There may be multiple contexts from multiple webpages separated by the delimiter `|||`. Within each context, line breaks have been replaced with a `<docline>` token.

`SRC-TRG.both_contexts` contains 4 fields, since it has both the `SRC_context` and `TRG_context` fields.

Remember to replace `SRC` and `TRG` in these examples with the actual language codes in each case. `SRC` is always `eng`, while `TRG` can be `deu`, `fra`, `ces`, `pol`, or `rus`.

### Data Splits
This dataset does not contain any validation or test sets; all the provided data is intended to be used for training.

If you need document-level validation/test sets for use while training models with this data, it should be quite simple to construct them in the same format from other readily available test sets with document information such as [WMT](https://www2.statmt.org/wmt24/translation-task.html) test sets.

## Dataset Creation

### Curation Rationale

<!-- Motivation for the creation of this dataset. -->
While document-level machine translation has inherent advantages over sentence-level approaches, there are very few large-scale document-level parallel corpora available publicly. Parallel corpora constructed from web crawls often discard document context in the process of extracting sentence pairs. ParaCrawl released sentence-level parallel corpora with their source URLs, and separately also released raw web text, so we are able to match the URLs to recover the context that the sentences originally occurred in. This enables us to create large-scale parallel corpora for training document-level machine translation models.

### Source Data

<!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). -->
This dataset was extracted entirely from [parallel corpora](https://paracrawl.eu/) and [raw web text](https://paracrawl.eu/moredata) released by ParaCrawl. Please refer to the [ParaCrawl paper](https://aclanthology.org/2020.acl-main.417/) for more information about the source of the data.

#### Data Collection and Processing

<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->

To extract the contexts for ParaCrawl sentence pairs, we used the following method (copied from the [paper](https://aclanthology.org/2024.acl-long.712/)):
1. Extract the source URLs and corresponding sentences from the TMX files from [ParaCrawl release 9](https://paracrawl.eu/releases) (or the bonus release in the case of eng-rus). Each sentence is usually associated with many different source URLs, and we keep all of them.
2. Match the extracted URLs with the URLs from all the raw text data and get the corresponding base64-encoded webpage/document, if available.
3. Decode the base64 documents and try to match the original sentence. If the sentence is not found in the document, discard the document. Otherwise, keep the 512 tokens preceding the sentence (where a token is anything separated by a space), replace line breaks with a special `<docline>` token, and store it as the document context. Since some very common sentences correspond to huge numbers of source URLs, we keep a maximum of 1000 unique contexts per sentence separated by a delimiter `|||` in the final dataset.
4. Finally, we compile three different files per language pair – a dataset with all sentence pairs where we have one or more source contexts (`*.src_contexts`), one with all sentence pairs with target contexts (`*.trg_contexts`), and a third dataset with both contexts (`*.both_contexts`).

#### Who are the source data producers?

<!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. -->

See the [ParaCrawl paper](https://aclanthology.org/2020.acl-main.417/).

#### Personal and Sensitive Information

<!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. -->

This dataset is constructed from web crawled data, and thus may contain sensitive or harmful data. The ParaCrawl datasets were released after some filtering at the sentence pair level, but please note that the contexts we extracted from the original webpages have not been filtered in any way.

## Bias, Risks, and Limitations

<!-- This section is meant to convey both technical and sociotechnical limitations. -->
\[This section has been copied from the [paper](https://aclanthology.org/2024.acl-long.712/), which you can refer to for details.\]

**Relevance of context**: Our work assumes that any extracted text preceding a given sentence on a webpage is relevant “document context” for that sentence. However, it is likely in many cases that the extracted context is unrelated to the sentence, since most webpages are not formatted as a coherent “document”. As a result, the dataset often includes irrelevant context like lists of products, UI elements, or video titles extracted from webpages which will not be directly helpful to document-level translation models.

**Unaligned contexts**: For sentences with multiple matching contexts, the source and target contexts may not always be aligned. However, the vast majority of sentence pairs have exactly one source/target context, and should therefore have aligned contexts. We recommend filtering on this basis if aligned contexts are required.

**Language coverage**: ParaCrawl was focused on European Union languages with only a few “bonus” releases for other languages. Moreover, most of the corpora were for English-centric language pairs. Due to the high computational requirements to extract these corpora, our work further chose only a subset of these languages, resulting in corpora for only a few European languages, some of them closely related. Given the availability of raw data and tools to extract such corpora for many more languages from all over the world, we hope the community is encouraged to build such resources for a much larger variety of language pairs.

**Harmful content**: The main released corpora from ParaCrawl were filtered to remove sensitive content, particularly pornography. Due to pornographic websites typically containing large amounts of machine translated text, this filtering also improved the quality of the resulting corpora. However, when we match sentences with their source URLs, it often happens that an innocuous sentence was extracted from a webpage with harmful content, and this content is present in our document contexts. We may release filtered versions of these corpora in the future, pending further work to filter harmful content at the document level.



### Recommendations

<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->

Please be aware that this contains unfiltered data from the internet, and may contain harmful content. For details about the content and limitations of this dataset, read this dataset card as well as [our paper](https://aclanthology.org/2024.acl-long.712/) before using the data for anything where the translated content or its usage might be sensitive.

## Citation

<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->

Please cite the paper if you use this dataset.  
Until the ACL Anthology is updated with ACL 2024 papers, you can use the following BibTeX:

<!-- Update with ACL Anthology bibtex-->
```
@inproceedings{pal-etal-2024-document,
    title = "Document-Level Machine Translation with Large-Scale Public Parallel Corpora",
    author = "Pal, Proyag  and
      Birch, Alexandra  and
      Heafield, Kenneth",
    editor = "Ku, Lun-Wei  and
      Martins, Andre  and
      Srikumar, Vivek",
    booktitle = "Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
    month = aug,
    year = "2024",
    address = "Bangkok, Thailand",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2024.acl-long.712",
    pages = "13185--13197",
}
```

## Dataset Card Authors

This dataset card was written by [Proyag Pal](https://proyag.github.io/). The [paper](https://aclanthology.org/2024.acl-long.712/) this dataset was created for was written by Proyag Pal, Alexandra Birch, and Kenneth Heafield at the University of Edinburgh.

## Dataset Card Contact

If you have any comments or questions, contact [Proyag Pal](mailto:[email protected]).