|
import re |
|
import os |
|
import torch |
|
import json |
|
|
|
from transformers import AutoTokenizer, AutoModelForTokenClassification, AutoConfig |
|
from datasets import load_dataset |
|
|
|
|
|
URL_RE = re.compile(r"https?:\/\/[\w\.\/\?\=\d&#%_:/-]+") |
|
HANDLE_RE = re.compile(r"@\w+") |
|
|
|
|
|
def preprocess_bernice(text): |
|
text = HANDLE_RE.sub("@USER", text) |
|
text = URL_RE.sub("HTTPURL", text) |
|
return text |
|
|
|
|
|
def preprocess_timelm(text): |
|
text = HANDLE_RE.sub("@user", text) |
|
text = URL_RE.sub("http", text) |
|
return text |
|
|
|
|
|
def preprocess(model_name, text): |
|
if model_name == "jhu-clsp/bernice": |
|
return preprocess_bernice(text) |
|
if "twitter-roberta-base" in model_name: |
|
return preprocess_timelm(text) |
|
return text |
|
|
|
|
|
class NER: |
|
|
|
id_to_label = { |
|
0: 'B-corporation', |
|
1: 'B-creative_work', |
|
2: 'B-event', |
|
3: 'B-group', |
|
4: 'B-location', |
|
5: 'B-person', |
|
6: 'B-product', |
|
7: 'I-corporation', |
|
8: 'I-creative_work', |
|
9: 'I-event', |
|
10: 'I-group', |
|
11: 'I-location', |
|
12: 'I-person', |
|
13: 'I-product', |
|
14: 'O' |
|
} |
|
|
|
def __init__(self, model_name: str): |
|
self.model_name = model_name |
|
self.config = AutoConfig.from_pretrained(self.model_name) |
|
self.model = AutoModelForTokenClassification.from_pretrained(self.model_name, config=self.config) |
|
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name) |
|
self.max_length = 128 |
|
|
|
if torch.cuda.is_available() and torch.cuda.device_count() > 0: |
|
self.device = torch.device('cuda') |
|
elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available() and torch.backends.mps.is_built(): |
|
self.device = torch.device("mps") |
|
else: |
|
self.device = torch.device('cpu') |
|
self.parallel = torch.cuda.device_count() > 1 |
|
if self.parallel: |
|
self.model = torch.nn.DataParallel(self.model) |
|
self.model.to(self.device) |
|
self.model.eval() |
|
self.dataset = load_dataset("tweettemposhift/tweet_temporal_shift", "ner_temporal") |
|
self.tokenized_datasets = self.dataset.map(lambda x: self.tokenize_and_align_labels(x), batched=True) |
|
|
|
def get_prediction(self, export_dir: str, batch_size: int): |
|
os.makedirs(export_dir, exist_ok=True) |
|
for test_split in ["test_1", "test_2", "test_3", "test_4"]: |
|
predictions = self.predict(self.tokenized_datasets[test_split], batch_size) |
|
with open(f"{export_dir}/{test_split}.jsonl", "w") as f: |
|
f.write("\n".join([json.dumps(i) for i in predictions])) |
|
|
|
def predict(self, example, batch_size: int): |
|
input_keys = ['input_ids', 'attention_mask'] |
|
indices = list(range(0, len(example), batch_size)) + [len(example) + 1] |
|
preds = [] |
|
labels = [] |
|
with torch.no_grad(): |
|
for i in range(len(indices) - 1): |
|
encoded_input = example[indices[i]: indices[i + 1]] |
|
labels += [ |
|
[self.id_to_label[y] if y in self.id_to_label else y for y in x] |
|
for x in encoded_input['labels'] |
|
] |
|
output = self.model(**{ |
|
k: torch.tensor(encoded_input[k]).to(self.device) for k in input_keys if k in encoded_input |
|
}) |
|
prob = torch.softmax(output['logits'], dim=-1) |
|
pred = torch.max(prob, dim=-1)[1].cpu().detach().int().tolist() |
|
preds += [[self.id_to_label[_p] for _p in p] for p in pred] |
|
return [{"prediction": p, "label": i} for p, i in zip(preds, labels)] |
|
|
|
def tokenize_and_align_labels(self, examples): |
|
tokens = [[preprocess(self.model_name, w) for w in t] for t in examples["text_tokenized"]] |
|
tokenized_inputs = self.tokenizer( |
|
tokens, |
|
truncation=True, |
|
is_split_into_words=True, |
|
padding="max_length", |
|
max_length=128 |
|
) |
|
all_labels = examples["gold_label_sequence"] |
|
new_labels = [] |
|
for token, label in zip(tokens, all_labels): |
|
tmp_labels = [-100] |
|
for to, la in zip(token, label): |
|
to_tokenized = self.tokenizer.tokenize(to) |
|
tmp_labels += [la] * len(to_tokenized) |
|
if len(tmp_labels) > 128: |
|
tmp_labels = tmp_labels[:128] |
|
else: |
|
tmp_labels = tmp_labels + [-100] * (128 - len(tmp_labels)) |
|
new_labels.append(tmp_labels) |
|
tokenized_inputs["labels"] = new_labels |
|
return tokenized_inputs |
|
|
|
|
|
if __name__ == '__main__': |
|
model_list = [ |
|
"roberta-base", |
|
"bertweet-base", |
|
"bernice", |
|
"roberta-large", |
|
"bertweet-large", |
|
"twitter-roberta-base-2019-90m", |
|
"twitter-roberta-base-dec2020", |
|
"twitter-roberta-base-2021-124m", |
|
"twitter-roberta-base-2022-154m", |
|
"twitter-roberta-large-2022-154m" |
|
] |
|
for model_m in model_list: |
|
alias = f"tweettemposhift/ner-ner_temporal-{model_m}" |
|
NER(alias).get_prediction(export_dir=f"prediction_files/{os.path.basename(alias)}", batch_size=32) |
|
for random_r in range(4): |
|
for seed_s in range(3): |
|
alias = f"tweettemposhift/ner-ner_random{random_r}_seed{seed_s}-{model_m}" |
|
NER(alias).get_prediction(export_dir=f"prediction_files/{os.path.basename(alias)}", batch_size=32) |
|
|