Datasets:
CZLC
/

Modalities:
Text
Formats:
json
Languages:
Czech
Libraries:
Datasets
pandas
License:
File size: 3,391 Bytes
eea4de0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
TARGET = ".data/speeches.vert.shuffled"

import os
import re
from typing import Dict

import jsonlines
from tqdm import tqdm


def process_vert_format(vert_content: str) -> Dict[str, str]:
    doc_pattern = re.compile(r'<doc[^>]*>.*?</doc>', re.DOTALL)

    # Pattern to match document boundaries and extract metadata
    metadata_pattern = re.compile(
        r'<doc id="([^"]*)"\s+speaker="([^"]*)"\s+year="([^"]*)"\s+date="([^"]*)"\s+title="([^"]*)"\s+office="([^"]*)"\s+occasion="([^"]*)"\s+medium="([^"]*)"\s+source="([^"]*)"\s+link="([^"]*)"\s+altsource="([^"]*)"\s+status="([^"]*)"\s+notes="([^"]*)">'
    )

    block_pattern = re.compile(r'<block[^>]*>.*?</block>', re.DOTALL)

    # Pattern to remove whitespace before punctuation
    ws_before_punct = re.compile(r'\s+([.,!?:;])')

    # Find all documents
    documents = re.findall(doc_pattern, vert_content)
    processed_documents = {}

    for doc in tqdm(documents):
        # Extract metadata
        metadata_match = re.search(metadata_pattern, doc)
        if metadata_match:
            doc_id = metadata_match.group(1)
            speaker = metadata_match.group(2)
            year = metadata_match.group(3)
            date = metadata_match.group(4)
            title = metadata_match.group(5)
            office = metadata_match.group(6)
            occasion = metadata_match.group(7)
            medium = metadata_match.group(8)
            source = metadata_match.group(9)
            link = metadata_match.group(10)
            altsource = metadata_match.group(11)
            status = metadata_match.group(12)
            notes = metadata_match.group(13)

            metadata_str = (f"Řečník: {speaker}, "
                            f"Rok: {year}, "
                            f"Datum: {date}, "
                            f"Název: {title}, "
                            f"Úřad: {office}, "
                            f"Příležitost: {occasion}, ")

        else:
            raise ValueError("Metadata not found in document")

        for bid, block in enumerate(re.findall(block_pattern, doc)):
            # remove tags from each line, and join text
            tokens = [line.split("\t")[0].strip() for line in block.split("\n") if line.strip() != ""]
            doc_text = " ".join(tokens)

            # remove any text with <...> tag
            doc_text = re.sub(r'<[^>]*>', '', doc_text)

            # replace more than one space with one space
            doc_text = re.sub(r'\s+', ' ', doc_text).strip()

            # remove whitespace before ., !, ?
            doc_text = re.sub(ws_before_punct, r'\1', doc_text)

            # - sometimes lines in oral are empty? e.g. 08A009N // REMOVE THESE LINES
            if doc_text.strip() == "":
                continue

            processed_documents[f"{doc_id}_{bid}"] = metadata_str + "\n" + doc_text

    return processed_documents


# Read the content from the file
with open(TARGET, "r") as f:
    vert_content = f.read()

# Process the content
processed_documents = process_vert_format(vert_content)

# write all splits into same json file in  .data/hf_dataset/cnc_fictree/test.jsonl
OF = ".data/hf_dataset/cnc_prez_prejavy/test.jsonl"
os.makedirs(os.path.dirname(OF), exist_ok=True)
with jsonlines.open(OF, "w") as writer:
    for doc_id, doc in list(processed_documents.items()):
        writer.write({"text": doc, "id": doc_id})