Datasets:
CZLC
/

Modalities:
Text
Formats:
json
Languages:
Czech
Libraries:
Datasets
pandas
License:
mfajcik commited on
Commit
eea4de0
·
verified ·
1 Parent(s): 2cf3e7f

Upload 2 files

Browse files
Files changed (2) hide show
  1. convert_speeches.py +91 -0
  2. test.jsonl +0 -0
convert_speeches.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ TARGET = ".data/speeches.vert.shuffled"
2
+
3
+ import os
4
+ import re
5
+ from typing import Dict
6
+
7
+ import jsonlines
8
+ from tqdm import tqdm
9
+
10
+
11
+ def process_vert_format(vert_content: str) -> Dict[str, str]:
12
+ doc_pattern = re.compile(r'<doc[^>]*>.*?</doc>', re.DOTALL)
13
+
14
+ # Pattern to match document boundaries and extract metadata
15
+ metadata_pattern = re.compile(
16
+ r'<doc id="([^"]*)"\s+speaker="([^"]*)"\s+year="([^"]*)"\s+date="([^"]*)"\s+title="([^"]*)"\s+office="([^"]*)"\s+occasion="([^"]*)"\s+medium="([^"]*)"\s+source="([^"]*)"\s+link="([^"]*)"\s+altsource="([^"]*)"\s+status="([^"]*)"\s+notes="([^"]*)">'
17
+ )
18
+
19
+ block_pattern = re.compile(r'<block[^>]*>.*?</block>', re.DOTALL)
20
+
21
+ # Pattern to remove whitespace before punctuation
22
+ ws_before_punct = re.compile(r'\s+([.,!?:;])')
23
+
24
+ # Find all documents
25
+ documents = re.findall(doc_pattern, vert_content)
26
+ processed_documents = {}
27
+
28
+ for doc in tqdm(documents):
29
+ # Extract metadata
30
+ metadata_match = re.search(metadata_pattern, doc)
31
+ if metadata_match:
32
+ doc_id = metadata_match.group(1)
33
+ speaker = metadata_match.group(2)
34
+ year = metadata_match.group(3)
35
+ date = metadata_match.group(4)
36
+ title = metadata_match.group(5)
37
+ office = metadata_match.group(6)
38
+ occasion = metadata_match.group(7)
39
+ medium = metadata_match.group(8)
40
+ source = metadata_match.group(9)
41
+ link = metadata_match.group(10)
42
+ altsource = metadata_match.group(11)
43
+ status = metadata_match.group(12)
44
+ notes = metadata_match.group(13)
45
+
46
+ metadata_str = (f"Řečník: {speaker}, "
47
+ f"Rok: {year}, "
48
+ f"Datum: {date}, "
49
+ f"Název: {title}, "
50
+ f"Úřad: {office}, "
51
+ f"Příležitost: {occasion}, ")
52
+
53
+ else:
54
+ raise ValueError("Metadata not found in document")
55
+
56
+ for bid, block in enumerate(re.findall(block_pattern, doc)):
57
+ # remove tags from each line, and join text
58
+ tokens = [line.split("\t")[0].strip() for line in block.split("\n") if line.strip() != ""]
59
+ doc_text = " ".join(tokens)
60
+
61
+ # remove any text with <...> tag
62
+ doc_text = re.sub(r'<[^>]*>', '', doc_text)
63
+
64
+ # replace more than one space with one space
65
+ doc_text = re.sub(r'\s+', ' ', doc_text).strip()
66
+
67
+ # remove whitespace before ., !, ?
68
+ doc_text = re.sub(ws_before_punct, r'\1', doc_text)
69
+
70
+ # - sometimes lines in oral are empty? e.g. 08A009N // REMOVE THESE LINES
71
+ if doc_text.strip() == "":
72
+ continue
73
+
74
+ processed_documents[f"{doc_id}_{bid}"] = metadata_str + "\n" + doc_text
75
+
76
+ return processed_documents
77
+
78
+
79
+ # Read the content from the file
80
+ with open(TARGET, "r") as f:
81
+ vert_content = f.read()
82
+
83
+ # Process the content
84
+ processed_documents = process_vert_format(vert_content)
85
+
86
+ # write all splits into same json file in .data/hf_dataset/cnc_fictree/test.jsonl
87
+ OF = ".data/hf_dataset/cnc_prez_prejavy/test.jsonl"
88
+ os.makedirs(os.path.dirname(OF), exist_ok=True)
89
+ with jsonlines.open(OF, "w") as writer:
90
+ for doc_id, doc in list(processed_documents.items()):
91
+ writer.write({"text": doc, "id": doc_id})
test.jsonl ADDED
The diff for this file is too large to render. See raw diff