updated format and data
Browse files- annotations_parser.py +10 -10
- data.jsonl +0 -0
annotations_parser.py
CHANGED
@@ -9,8 +9,8 @@ def parse_annotated_text(text):
|
|
9 |
# This variable will keep track of the number of characters removed
|
10 |
chars_removed = 0
|
11 |
|
12 |
-
# This will store the
|
13 |
-
|
14 |
|
15 |
# Buffer to store content without annotations
|
16 |
buffer = []
|
@@ -40,12 +40,12 @@ def parse_annotated_text(text):
|
|
40 |
buffer.append(text[last_end:start])
|
41 |
buffer.append(entity)
|
42 |
|
43 |
-
# Calculate the start and end
|
44 |
original_start = start - chars_removed
|
45 |
original_end = original_start + len(entity)
|
46 |
|
47 |
-
# Store the
|
48 |
-
|
49 |
|
50 |
# update the chars_removed counter
|
51 |
chars_removed += len(full_match) - len(entity)
|
@@ -61,7 +61,7 @@ def parse_annotated_text(text):
|
|
61 |
|
62 |
return {
|
63 |
'text': content_without_annotations,
|
64 |
-
'
|
65 |
'labels': labels
|
66 |
}
|
67 |
|
@@ -99,7 +99,7 @@ def load_yedda_annotations(directory):
|
|
99 |
'file': filename,
|
100 |
'annotated_text': content,
|
101 |
'text': parsed['text'],
|
102 |
-
'
|
103 |
'labels': parsed['labels'],
|
104 |
}
|
105 |
all_annotations.append(file_annotations)
|
@@ -111,7 +111,7 @@ def convert_to_ann(annotatations):
|
|
111 |
text = annotatations['text']
|
112 |
buffer = []
|
113 |
i = 0
|
114 |
-
for (j_start, j_end), label in zip(annotatations['
|
115 |
|
116 |
buffer += text[i:j_start]
|
117 |
buffer += [f'[@{text[j_start:j_end]}#{label}*]']
|
@@ -133,8 +133,8 @@ if __name__ == '__main__':
|
|
133 |
print('File:', file_annotation['file'])
|
134 |
print('Text[:100]:', repr(file_annotation['text'][:100]))
|
135 |
print('Number of labels:', len(file_annotation['labels']))
|
136 |
-
assert len(file_annotation['labels']) == len(file_annotation['
|
137 |
-
print('Average labeled sentence length:', sum(end-start for start,end in file_annotation['
|
138 |
print('--------------------------------')
|
139 |
|
140 |
print('Total number of files:', len(annotations))
|
|
|
9 |
# This variable will keep track of the number of characters removed
|
10 |
chars_removed = 0
|
11 |
|
12 |
+
# This will store the spans of the entities in the original text
|
13 |
+
spans_in_original_text = []
|
14 |
|
15 |
# Buffer to store content without annotations
|
16 |
buffer = []
|
|
|
40 |
buffer.append(text[last_end:start])
|
41 |
buffer.append(entity)
|
42 |
|
43 |
+
# Calculate the start and end spans in the original text
|
44 |
original_start = start - chars_removed
|
45 |
original_end = original_start + len(entity)
|
46 |
|
47 |
+
# Store the spans
|
48 |
+
spans_in_original_text.append((original_start, original_end))
|
49 |
|
50 |
# update the chars_removed counter
|
51 |
chars_removed += len(full_match) - len(entity)
|
|
|
61 |
|
62 |
return {
|
63 |
'text': content_without_annotations,
|
64 |
+
'spans': spans_in_original_text,
|
65 |
'labels': labels
|
66 |
}
|
67 |
|
|
|
99 |
'file': filename,
|
100 |
'annotated_text': content,
|
101 |
'text': parsed['text'],
|
102 |
+
'spans': parsed['spans'],
|
103 |
'labels': parsed['labels'],
|
104 |
}
|
105 |
all_annotations.append(file_annotations)
|
|
|
111 |
text = annotatations['text']
|
112 |
buffer = []
|
113 |
i = 0
|
114 |
+
for (j_start, j_end), label in zip(annotatations['spans'], annotatations['labels']):
|
115 |
|
116 |
buffer += text[i:j_start]
|
117 |
buffer += [f'[@{text[j_start:j_end]}#{label}*]']
|
|
|
133 |
print('File:', file_annotation['file'])
|
134 |
print('Text[:100]:', repr(file_annotation['text'][:100]))
|
135 |
print('Number of labels:', len(file_annotation['labels']))
|
136 |
+
assert len(file_annotation['labels']) == len(file_annotation['spans'])
|
137 |
+
print('Average labeled sentence length:', sum(end-start for start,end in file_annotation['spans']) / len(file_annotation['spans']))
|
138 |
print('--------------------------------')
|
139 |
|
140 |
print('Total number of files:', len(annotations))
|
data.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|