init
Browse files- experiments/model_predict_ner.py +59 -22
experiments/model_predict_ner.py
CHANGED
@@ -2,7 +2,7 @@ import re
|
|
2 |
import os
|
3 |
import torch
|
4 |
import json
|
5 |
-
|
6 |
from transformers import AutoTokenizer, AutoModelForTokenClassification, AutoConfig
|
7 |
from datasets import load_dataset
|
8 |
|
@@ -33,13 +33,30 @@ def preprocess(model_name, text):
|
|
33 |
|
34 |
class NER:
|
35 |
|
36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
self.model_name = model_name
|
38 |
self.config = AutoConfig.from_pretrained(self.model_name)
|
39 |
self.model = AutoModelForTokenClassification.from_pretrained(self.model_name, config=self.config)
|
40 |
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
|
41 |
-
self.max_length =
|
42 |
-
self.id_to_label = id_to_label
|
43 |
# GPU setup (https://github.com/cardiffnlp/tweetnlp/issues/15)
|
44 |
if torch.cuda.is_available() and torch.cuda.device_count() > 0:
|
45 |
self.device = torch.device('cuda')
|
@@ -53,39 +70,59 @@ class NER:
|
|
53 |
self.model.to(self.device)
|
54 |
self.model.eval()
|
55 |
self.dataset = load_dataset("tweettemposhift/tweet_temporal_shift", "ner_temporal")
|
|
|
56 |
|
57 |
def get_prediction(self, export_dir: str, batch_size: int):
|
58 |
os.makedirs(export_dir, exist_ok=True)
|
59 |
for test_split in ["test_1", "test_2", "test_3", "test_4"]:
|
|
|
|
|
|
|
60 |
data = self.dataset[test_split]
|
61 |
predictions = self.predict(data["text"], batch_size)
|
62 |
with open(f"{export_dir}/{test_split}.jsonl", "w") as f:
|
63 |
f.write("\n".join([json.dumps(i) for i in predictions]))
|
64 |
|
65 |
-
|
66 |
-
|
67 |
-
for i in :
|
68 |
-
f.write(json.dumps(i) + "\n")
|
69 |
-
|
70 |
-
def predict(self, text: List[str], batch_size: int):
|
71 |
-
text = [[preprocess(self.model_name, t) for t in i] for i in text]
|
72 |
-
indices = list(range(0, len(text), batch_size)) + [len(text) + 1]
|
73 |
-
inputs = []
|
74 |
preds = []
|
|
|
75 |
with torch.no_grad():
|
76 |
for i in range(len(indices) - 1):
|
77 |
-
encoded_input =
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
truncation=True)
|
83 |
-
inputs += encoded_input['input_ids'].cpu().detach().int().tolist()
|
84 |
output = self.model(**{k: v.to(self.device) for k, v in encoded_input.items()})
|
85 |
prob = torch.softmax(output['logits'], dim=-1).cpu().detach().float().tolist()
|
86 |
pred = torch.max(prob, dim=-1)[1].cpu().detach().int().tolist()
|
87 |
preds += [[self.id_to_label[_p] for _p in p] for p in pred]
|
88 |
-
return [{"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
|
90 |
|
91 |
if __name__ == '__main__':
|
@@ -107,4 +144,4 @@ if __name__ == '__main__':
|
|
107 |
for random_r in range(4):
|
108 |
for seed_s in range(3):
|
109 |
alias = f"tweettemposhift/ner-ner_random{random_r}_seed{seed_s}-{model_m}"
|
110 |
-
|
|
|
2 |
import os
|
3 |
import torch
|
4 |
import json
|
5 |
+
|
6 |
from transformers import AutoTokenizer, AutoModelForTokenClassification, AutoConfig
|
7 |
from datasets import load_dataset
|
8 |
|
|
|
33 |
|
34 |
class NER:
|
35 |
|
36 |
+
id_to_label = {
|
37 |
+
'0': 'B-corporation',
|
38 |
+
'1': 'B-creative_work',
|
39 |
+
'2': 'B-event',
|
40 |
+
'3': 'B-group',
|
41 |
+
'4': 'B-location',
|
42 |
+
'5': 'B-person',
|
43 |
+
'6': 'B-product',
|
44 |
+
'7': 'I-corporation',
|
45 |
+
'8': 'I-creative_work',
|
46 |
+
'9': 'I-event',
|
47 |
+
'10': 'I-group',
|
48 |
+
'11': 'I-location',
|
49 |
+
'12': 'I-person',
|
50 |
+
'13': 'I-product',
|
51 |
+
'14': 'O'
|
52 |
+
}
|
53 |
+
|
54 |
+
def __init__(self, model_name: str):
|
55 |
self.model_name = model_name
|
56 |
self.config = AutoConfig.from_pretrained(self.model_name)
|
57 |
self.model = AutoModelForTokenClassification.from_pretrained(self.model_name, config=self.config)
|
58 |
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
|
59 |
+
self.max_length = 128
|
|
|
60 |
# GPU setup (https://github.com/cardiffnlp/tweetnlp/issues/15)
|
61 |
if torch.cuda.is_available() and torch.cuda.device_count() > 0:
|
62 |
self.device = torch.device('cuda')
|
|
|
70 |
self.model.to(self.device)
|
71 |
self.model.eval()
|
72 |
self.dataset = load_dataset("tweettemposhift/tweet_temporal_shift", "ner_temporal")
|
73 |
+
self.tokenized_datasets = self.dataset.map(lambda x: self.tokenize_and_align_labels(x), batched=True)
|
74 |
|
75 |
def get_prediction(self, export_dir: str, batch_size: int):
|
76 |
os.makedirs(export_dir, exist_ok=True)
|
77 |
for test_split in ["test_1", "test_2", "test_3", "test_4"]:
|
78 |
+
data = self.tokenized_datasets[test_split]
|
79 |
+
|
80 |
+
# TODO: check the data format and fix the predict function accordingly
|
81 |
data = self.dataset[test_split]
|
82 |
predictions = self.predict(data["text"], batch_size)
|
83 |
with open(f"{export_dir}/{test_split}.jsonl", "w") as f:
|
84 |
f.write("\n".join([json.dumps(i) for i in predictions]))
|
85 |
|
86 |
+
def predict(self, example, batch_size: int):
|
87 |
+
indices = list(range(0, len(example), batch_size)) + [len(example) + 1]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
preds = []
|
89 |
+
labels = []
|
90 |
with torch.no_grad():
|
91 |
for i in range(len(indices) - 1):
|
92 |
+
encoded_input = example[indices[i]: indices[i + 1]]
|
93 |
+
labels += [
|
94 |
+
[self.id_to_label[y] if y in self.id_to_label else y for y in x]
|
95 |
+
for x in encoded_input['labels']
|
96 |
+
]
|
|
|
|
|
97 |
output = self.model(**{k: v.to(self.device) for k, v in encoded_input.items()})
|
98 |
prob = torch.softmax(output['logits'], dim=-1).cpu().detach().float().tolist()
|
99 |
pred = torch.max(prob, dim=-1)[1].cpu().detach().int().tolist()
|
100 |
preds += [[self.id_to_label[_p] for _p in p] for p in pred]
|
101 |
+
return [{"prediction": p, "label": i} for p, i in zip(preds, labels)]
|
102 |
+
|
103 |
+
def tokenize_and_align_labels(self, examples):
|
104 |
+
tokens = [[preprocess(self.model_name, w) for w in t] for t in examples["text_tokenized"]]
|
105 |
+
tokenized_inputs = self.tokenizer(
|
106 |
+
tokens,
|
107 |
+
truncation=True,
|
108 |
+
is_split_into_words=True,
|
109 |
+
padding="max_length",
|
110 |
+
max_length=128
|
111 |
+
)
|
112 |
+
all_labels = examples["gold_label_sequence"]
|
113 |
+
new_labels = []
|
114 |
+
for token, label in zip(tokens, all_labels):
|
115 |
+
tmp_labels = [-100]
|
116 |
+
for to, la in zip(token, label):
|
117 |
+
to_tokenized = self.tokenizer.tokenize(to)
|
118 |
+
tmp_labels += [la] * len(to_tokenized)
|
119 |
+
if len(tmp_labels) > 128:
|
120 |
+
tmp_labels = tmp_labels[:128]
|
121 |
+
else:
|
122 |
+
tmp_labels = tmp_labels + [-100] * (128 - len(tmp_labels))
|
123 |
+
new_labels.append(tmp_labels)
|
124 |
+
tokenized_inputs["labels"] = new_labels
|
125 |
+
return tokenized_inputs
|
126 |
|
127 |
|
128 |
if __name__ == '__main__':
|
|
|
144 |
for random_r in range(4):
|
145 |
for seed_s in range(3):
|
146 |
alias = f"tweettemposhift/ner-ner_random{random_r}_seed{seed_s}-{model_m}"
|
147 |
+
NER(alias).get_prediction(export_dir=f"prediction_files/{os.path.basename(alias)}", batch_size=32)
|