Datasets:
CZLC
/

Modalities:
Text
Formats:
json
Languages:
Czech
Libraries:
Datasets
pandas
License:
ner_court_decisions / convert_ner_court_decisions.py
mfajcik's picture
Upload 3 files
e3bf509 verified
import os
import random
from datasets import load_dataset
def whitespace_tokenize_with_offsets(text):
tokens = []
start_tok_offsets = []
end_tok_offsets = []
current_token = ""
current_token_start = None
for i, char in enumerate(text):
if char.isspace():
if current_token:
tokens.append(current_token)
start_tok_offsets.append(current_token_start)
end_tok_offsets.append(i)
current_token = ""
current_token_start = None
else:
if current_token == "":
current_token_start = i
current_token += char
# Add the last token if there is one
if current_token:
tokens.append(current_token)
start_tok_offsets.append(current_token_start)
end_tok_offsets.append(len(text))
return tokens, start_tok_offsets, end_tok_offsets
def proc_dataset(dataset, max_text_length=200):
r = []
for doc in dataset:
text = doc["text"]
covered_entities = set()
for ent_id, entity in enumerate(doc["entities"]):
if ent_id in covered_entities:
continue
target_text = text
if len(text) > max_text_length:
tokens, start_tok_offsets, end_tok_offsets = whitespace_tokenize_with_offsets(text)
entity_start = entity["start"]
entity_end = entity["end"]
# Find the token indices that correspond to the entity
entity_start_idx = None
entity_end_idx = None
for idx, (start, end) in enumerate(zip(start_tok_offsets, end_tok_offsets)):
if start <= entity_start < end:
entity_start_idx = idx
if start < entity_end <= end:
entity_end_idx = idx
break
if entity_start_idx is None or entity_end_idx is None:
continue
allowed_tokens = max_text_length - len(tokens[entity_start_idx:entity_end_idx + 1]) - 20
before_tokens = random.randint(0, int(allowed_tokens * 0.8))
after_tokens = allowed_tokens - before_tokens
# Determine the start and end indices for the new text segment
if entity_start_idx - before_tokens < 0:
after_tokens += - (entity_start_idx - before_tokens)
elif entity_end_idx + after_tokens + 1 >= len(tokens):
before_tokens += entity_end_idx + after_tokens + 1 - len(tokens)
start_idx = max(0, entity_start_idx - before_tokens)
end_idx = min(len(tokens), entity_end_idx + after_tokens + 1)
# Ensure the first 20 tokens are included if possible
initial_text = ""
if start_idx > 20:
initial_text = text[:end_tok_offsets[20]] + "... "
# Use offsets to extract the original text
start_offset = start_tok_offsets[start_idx]
end_offset = end_tok_offsets[end_idx - 1]
target_text = initial_text + text[start_offset:end_offset]
# if target text contains more entities of the same type, add them to the answers and covered entities
this_answer_entities = [ent_id]
answers = [entity["content"]]
for ent_id2, entity2 in enumerate(doc["entities"]):
if ent_id2 == ent_id:
continue
# check type
if entity2["category_str"] == entity["category_str"]:
# just check the string in the target text
# check if the entity is in the target text
if entity2["content"] in target_text:
this_answer_entities.append(ent_id2)
answers.append(entity2["content"])
covered_entities.update(this_answer_entities)
r.append({
"label": entity["category_str"],
"answers": list(set(answers)),
"text": target_text,
})
return r
d = load_dataset("fewshot-goes-multilingual/cs_czech-court-decisions-ner")
train = list(d['train'])
random.shuffle(train)
new_dataset_train = proc_dataset(train[200:])
dataset_test_ftrain = proc_dataset(train[:200])
dataset_val = proc_dataset(d['validation'])
dataset_test = proc_dataset(d['test'])
# merge splits
new_dataset_test = dataset_test_ftrain + dataset_val + dataset_test
random.shuffle(new_dataset_test)
# save using jsonlines in .data/hf_datasets/ner_court_decisions
os.makedirs(".data/hf_dataset/ner_court_decisions", exist_ok=True)
import jsonlines
# print dataset lengths
print("train", len(new_dataset_train))
print("test", len(new_dataset_test))
with jsonlines.open(".data/hf_dataset/ner_court_decisions/test.jsonl", "w") as f:
f.write_all(new_dataset_test)
with jsonlines.open(".data/hf_dataset/ner_court_decisions/train.jsonl", "w") as f:
f.write_all(new_dataset_train)