Datasets:
CZLC
/

Modalities:
Text
Formats:
json
Languages:
Czech
Libraries:
Datasets
pandas
License:
File size: 5,135 Bytes
41e0887
 
 
 
 
 
607e706
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e3bf509
41e0887
 
 
607e706
 
 
 
41e0887
 
607e706
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41e0887
 
607e706
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41e0887
 
607e706
41e0887
 
 
 
 
 
 
 
607e706
 
41e0887
 
 
 
 
 
 
 
 
 
 
2c7b81b
 
 
 
41e0887
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
import os
import random

from datasets import load_dataset


def whitespace_tokenize_with_offsets(text):
    tokens = []
    start_tok_offsets = []
    end_tok_offsets = []
    current_token = ""
    current_token_start = None

    for i, char in enumerate(text):
        if char.isspace():
            if current_token:
                tokens.append(current_token)
                start_tok_offsets.append(current_token_start)
                end_tok_offsets.append(i)
                current_token = ""
                current_token_start = None
        else:
            if current_token == "":
                current_token_start = i
            current_token += char

    # Add the last token if there is one
    if current_token:
        tokens.append(current_token)
        start_tok_offsets.append(current_token_start)
        end_tok_offsets.append(len(text))

    return tokens, start_tok_offsets, end_tok_offsets


def proc_dataset(dataset, max_text_length=200):
    r = []
    for doc in dataset:
        text = doc["text"]
        covered_entities = set()
        for ent_id, entity in enumerate(doc["entities"]):
            if ent_id in covered_entities:
                continue
            target_text = text
            if len(text) > max_text_length:
                tokens, start_tok_offsets, end_tok_offsets = whitespace_tokenize_with_offsets(text)
                entity_start = entity["start"]
                entity_end = entity["end"]

                # Find the token indices that correspond to the entity
                entity_start_idx = None
                entity_end_idx = None
                for idx, (start, end) in enumerate(zip(start_tok_offsets, end_tok_offsets)):
                    if start <= entity_start < end:
                        entity_start_idx = idx
                    if start < entity_end <= end:
                        entity_end_idx = idx
                        break

                if entity_start_idx is None or entity_end_idx is None:
                    continue

                allowed_tokens = max_text_length - len(tokens[entity_start_idx:entity_end_idx + 1]) - 20
                before_tokens = random.randint(0, int(allowed_tokens * 0.8))
                after_tokens = allowed_tokens - before_tokens

                # Determine the start and end indices for the new text segment
                if entity_start_idx - before_tokens < 0:
                    after_tokens += - (entity_start_idx - before_tokens)
                elif entity_end_idx + after_tokens + 1 >= len(tokens):
                    before_tokens += entity_end_idx + after_tokens + 1 - len(tokens)
                start_idx = max(0, entity_start_idx - before_tokens)
                end_idx = min(len(tokens), entity_end_idx + after_tokens + 1)

                # Ensure the first 20 tokens are included if possible
                initial_text = ""
                if start_idx > 20:
                    initial_text = text[:end_tok_offsets[20]] + "... "

                # Use offsets to extract the original text
                start_offset = start_tok_offsets[start_idx]
                end_offset = end_tok_offsets[end_idx - 1]

                target_text = initial_text + text[start_offset:end_offset]

            # if target text contains more entities of the same type, add them to the answers and covered entities
            this_answer_entities = [ent_id]
            answers = [entity["content"]]
            for ent_id2, entity2 in enumerate(doc["entities"]):
                if ent_id2 == ent_id:
                    continue
                # check type
                if entity2["category_str"] == entity["category_str"]:
                    # just check the string in the target text
                    # check if the entity is in the target text
                    if entity2["content"] in target_text:
                        this_answer_entities.append(ent_id2)
                        answers.append(entity2["content"])

            covered_entities.update(this_answer_entities)

            r.append({
                "label": entity["category_str"],
                "answers": list(set(answers)),
                "text": target_text,
            })
    return r


d = load_dataset("fewshot-goes-multilingual/cs_czech-court-decisions-ner")
train = list(d['train'])
random.shuffle(train)
new_dataset_train = proc_dataset(train[200:])
dataset_test_ftrain = proc_dataset(train[:200])
dataset_val = proc_dataset(d['validation'])
dataset_test = proc_dataset(d['test'])

# merge splits
new_dataset_test = dataset_test_ftrain + dataset_val + dataset_test
random.shuffle(new_dataset_test)

# save using jsonlines in .data/hf_datasets/ner_court_decisions
os.makedirs(".data/hf_dataset/ner_court_decisions", exist_ok=True)
import jsonlines

# print dataset lengths
print("train", len(new_dataset_train))
print("test", len(new_dataset_test))

with jsonlines.open(".data/hf_dataset/ner_court_decisions/test.jsonl", "w") as f:
    f.write_all(new_dataset_test)
with jsonlines.open(".data/hf_dataset/ner_court_decisions/train.jsonl", "w") as f:
    f.write_all(new_dataset_train)