TARGET = ".data/speeches.vert.shuffled" import os import re from typing import Dict import jsonlines from tqdm import tqdm def process_vert_format(vert_content: str) -> Dict[str, str]: doc_pattern = re.compile(r']*>.*?', re.DOTALL) # Pattern to match document boundaries and extract metadata metadata_pattern = re.compile( r'' ) block_pattern = re.compile(r']*>.*?', re.DOTALL) # Pattern to remove whitespace before punctuation ws_before_punct = re.compile(r'\s+([.,!?:;])') # Find all documents documents = re.findall(doc_pattern, vert_content) processed_documents = {} for doc in tqdm(documents): # Extract metadata metadata_match = re.search(metadata_pattern, doc) if metadata_match: doc_id = metadata_match.group(1) speaker = metadata_match.group(2) year = metadata_match.group(3) date = metadata_match.group(4) title = metadata_match.group(5) office = metadata_match.group(6) occasion = metadata_match.group(7) medium = metadata_match.group(8) source = metadata_match.group(9) link = metadata_match.group(10) altsource = metadata_match.group(11) status = metadata_match.group(12) notes = metadata_match.group(13) metadata_str = (f"Řečník: {speaker}, " f"Rok: {year}, " f"Datum: {date}, " f"Název: {title}, " f"Úřad: {office}, " f"Příležitost: {occasion}, ") else: raise ValueError("Metadata not found in document") for bid, block in enumerate(re.findall(block_pattern, doc)): # remove tags from each line, and join text tokens = [line.split("\t")[0].strip() for line in block.split("\n") if line.strip() != ""] doc_text = " ".join(tokens) # remove any text with <...> tag doc_text = re.sub(r'<[^>]*>', '', doc_text) # replace more than one space with one space doc_text = re.sub(r'\s+', ' ', doc_text).strip() # remove whitespace before ., !, ? doc_text = re.sub(ws_before_punct, r'\1', doc_text) # - sometimes lines in oral are empty? e.g. 08A009N // REMOVE THESE LINES if doc_text.strip() == "": continue processed_documents[f"{doc_id}_{bid}"] = metadata_str + "\n" + doc_text return processed_documents # Read the content from the file with open(TARGET, "r") as f: vert_content = f.read() # Process the content processed_documents = process_vert_format(vert_content) # write all splits into same json file in .data/hf_dataset/cnc_fictree/test.jsonl OF = ".data/hf_dataset/cnc_prez_prejavy/test.jsonl" os.makedirs(os.path.dirname(OF), exist_ok=True) with jsonlines.open(OF, "w") as writer: for doc_id, doc in list(processed_documents.items()): writer.write({"text": doc, "id": doc_id})