Datasets:

ArXiv:
License:
File size: 4,370 Bytes
23b4df8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
484fee9
23b4df8
 
 
 
 
32cb2da
23b4df8
 
32cb2da
8c40b67
 
 
23b4df8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c1144c7
4e66769
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
"""EU Debates"""

import json
import os
import textwrap

import datasets


MAIN_CITATION = """
@inproceedings{chalkidis-and-brandl-eu-llama-2024,
    title = "Llama meets EU: Investigating the European political spectrum through the lens of LLMs",
    author = "Chalkidis, Ilias  and
      Stephanie Brandl",
    booktitle = "Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics",
    month = jun,
    year = "2021",
    address = "Mexico City, Mexico",
    publisher = "Association for Computational Linguistics",
}
"""

_DESCRIPTION = """
EU Debates is a corpus of parliamentary proceedings (debates) from the EU parliament. The corpus consists of approx. 87k individual speeches in the period 2009-2023. 
We exhaustively scrape the data from the official European Parliament Plenary website. All speeches are time-stamped, thematically organized on debates, 
and include metadata relevant to the speaker's identity (full name, euro-party affiliation, speaker role), and the debate (date and title). 
Older debate speeches are originally in English, while newer ones are linguistically diverse across the 23 official EU languages, thus we also provide machine-translated 
versions in English, when official translations are missing. 
"""
MAIN_PATH = 'https://huggingface.co/datasets/coastalcph/eu_debates/resolve/main'


class EUDebatesConfig(datasets.BuilderConfig):
    """BuilderConfig for EU Debates"""

    def __init__(
        self,
        data_url,
        citation,
        **kwargs,
    ):
        """BuilderConfig for EU Debates.

        Args:
          data_url: `string`, url to download the zip file from
          data_file: `string`, filename for data set
          **kwargs: keyword arguments forwarded to super.
        """
        super(EUDebatesConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
        self.data_url = data_url
        self.citation = citation


class EUDebates(datasets.GeneratorBasedBuilder):
    """EU Debates. Version 1.0"""

    BUILDER_CONFIGS = [
        EUDebatesConfig(
            name="eu_debates",
            data_url=os.path.join(MAIN_PATH, "eu_debates.zip"),
            citation=textwrap.dedent(MAIN_CITATION),
        ),
    ]

    def _info(self):
        features = {"text": datasets.Value("string"),
                    "translated_text": datasets.Value("string"),
                    "speaker_party": datasets.Value("string"),
                    "speaker_role": datasets.Value("string"),
                    "speaker_name": datasets.Value("string"),
                    "debate_title": datasets.Value("string"),
                    "date": datasets.Value("string"),
                    "year": datasets.Value("string")}
        return datasets.DatasetInfo(
            description=self.config.description,
            features=datasets.Features(features),
            homepage='https://www.europarl.europa.eu/',
            citation=MAIN_CITATION,
        )

    def _split_generators(self, dl_manager):
        data_dir = dl_manager.download_and_extract(self.config.data_url)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "filepath": os.path.join(data_dir, f"train.jsonl"),
                    "split": "train",
                },
            ),
        ]

    def _generate_examples(self, filepath, split):
        """This function returns the examples."""
        with open(filepath, encoding="utf-8") as f:
            for id_, row in enumerate(f):
                data = json.loads(row)
                if data['speaker_role'] == 'MEP':
                    example = {
                        "text": data["text"] if 'text' in data else None,
                        "translated_text": data["translated_text"] if 'translated_text' in data else None,
                        "speaker_party": data["speaker_party"],
                        "speaker_role": data["speaker_role"],
                        "speaker_name": data["speaker_name"],
                        "debate_title": data["debate_title"],
                        "date": data["date"],
                        "year": data["year"]
                    }
                    yield id_, example