asahi417 commited on
Commit
d7977c0
·
1 Parent(s): 253c4c9
experiments/huggingface_ops.py CHANGED
@@ -2,8 +2,8 @@ from huggingface_hub import HfApi, ModelFilter
2
  from pprint import pprint
3
 
4
  api = HfApi()
5
- models = api.list_models(filter=ModelFilter(author='vocabtrimmer'))
6
- models_filtered = [i.modelId for i in models if 'cardiffnlp/twitter-roberta-base-jun2020' in i.modelId]
7
  pprint(sorted(models_filtered))
8
  # models = api.list_models(filter=ModelFilter(author='tweettemposhift'))
9
  # models_filtered = [i.modelId for i in models if 'topic-' in i.modelId]
 
2
  from pprint import pprint
3
 
4
  api = HfApi()
5
+ models = api.list_models(filter=ModelFilter(author='tweettemposhift'))
6
+ models_filtered = [i.modelId for i in models if 'twitter-roberta-base-sep2021' in i.modelId]
7
  pprint(sorted(models_filtered))
8
  # models = api.list_models(filter=ModelFilter(author='tweettemposhift'))
9
  # models_filtered = [i.modelId for i in models if 'topic-' in i.modelId]
experiments/main.sh CHANGED
@@ -71,6 +71,8 @@ python model_finetuning_topic.py -m "${MODEL}" -d "topic_random1_seed2"
71
  python model_finetuning_topic.py -m "${MODEL}" -d "topic_random2_seed2"
72
  python model_finetuning_topic.py -m "${MODEL}" -d "topic_random3_seed2"
73
 
 
 
74
  download () {
75
  git clone "https://huggingface.co/tweettemposhift/ner-${1}-${2##*/}"
76
  mkdir "ckpt/ner-${1}-${2##*/}"
 
71
  python model_finetuning_topic.py -m "${MODEL}" -d "topic_random2_seed2"
72
  python model_finetuning_topic.py -m "${MODEL}" -d "topic_random3_seed2"
73
 
74
+ # Rerun eval for NER
75
+
76
  download () {
77
  git clone "https://huggingface.co/tweettemposhift/ner-${1}-${2##*/}"
78
  mkdir "ckpt/ner-${1}-${2##*/}"
experiments/model_predict_ner.py CHANGED
@@ -1,110 +1,110 @@
1
- # #WIP
2
- # import re
3
- # import os
4
- # import torch
5
- # import json
6
- # from typing import Dict, List
7
- # from transformers import AutoTokenizer, AutoModelForTokenClassification, AutoConfig
8
- # from datasets import load_dataset
9
- #
10
- # URL_RE = re.compile(r"https?:\/\/[\w\.\/\?\=\d&#%_:/-]+")
11
- # HANDLE_RE = re.compile(r"@\w+")
12
- #
13
- #
14
- # def preprocess_bernice(text):
15
- # text = HANDLE_RE.sub("@USER", text)
16
- # text = URL_RE.sub("HTTPURL", text)
17
- # return text
18
- #
19
- #
20
- # def preprocess_timelm(text):
21
- # text = HANDLE_RE.sub("@user", text)
22
- # text = URL_RE.sub("http", text)
23
- # return text
24
- #
25
- #
26
- # def preprocess(model_name, text):
27
- # if model_name == "jhu-clsp/bernice":
28
- # return preprocess_bernice(text)
29
- # if "twitter-roberta-base" in model_name:
30
- # return preprocess_timelm(text)
31
- # return text
32
- #
33
- #
34
- # class NER:
35
- #
36
- # def __init__(self, model_name: str, max_length: int, id_to_label: Dict[str, str]):
37
- # self.model_name = model_name
38
- # self.config = AutoConfig.from_pretrained(self.model_name)
39
- # self.model = AutoModelForTokenClassification.from_pretrained(self.model_name, config=self.config)
40
- # self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
41
- # self.max_length = max_length
42
- # self.id_to_label = id_to_label
43
- # # GPU setup (https://github.com/cardiffnlp/tweetnlp/issues/15)
44
- # if torch.cuda.is_available() and torch.cuda.device_count() > 0:
45
- # self.device = torch.device('cuda')
46
- # elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available() and torch.backends.mps.is_built():
47
- # self.device = torch.device("mps")
48
- # else:
49
- # self.device = torch.device('cpu')
50
- # self.parallel = torch.cuda.device_count() > 1
51
- # if self.parallel:
52
- # self.model = torch.nn.DataParallel(self.model)
53
- # self.model.to(self.device)
54
- # self.model.eval()
55
- # self.dataset = load_dataset("tweettemposhift/tweet_temporal_shift", "ner_temporal")
56
- #
57
- # def get_prediction(self, export_dir: str, batch_size: int):
58
- # os.makedirs(export_dir, exist_ok=True)
59
- # for test_split in ["test_1", "test_2", "test_3", "test_4"]:
60
- # data = self.dataset[test_split]
61
- # predictions = self.predict(data["text"], batch_size)
62
- # with open(f"{export_dir}/{test_split}.jsonl", "w") as f:
63
- # f.write("\n".join([json.dumps(i) for i in predictions]))
64
- #
65
- # with open(export_dir, "w") as f:
66
- # predictions = self.predict(self.dataset[], batch_size)
67
- # for i in :
68
- # f.write(json.dumps(i) + "\n")
69
- #
70
- # def predict(self, text: List[str], batch_size: int):
71
- # text = [[preprocess(self.model_name, t) for t in i] for i in text]
72
- # indices = list(range(0, len(text), batch_size)) + [len(text) + 1]
73
- # inputs = []
74
- # preds = []
75
- # with torch.no_grad():
76
- # for i in range(len(indices) - 1):
77
- # encoded_input = self.tokenizer.batch_encode_plus(
78
- # text[indices[i]: indices[i + 1]],
79
- # max_length=self.max_length,
80
- # return_tensors='pt',
81
- # padding=True,
82
- # truncation=True)
83
- # inputs += encoded_input['input_ids'].cpu().detach().int().tolist()
84
- # output = self.model(**{k: v.to(self.device) for k, v in encoded_input.items()})
85
- # prob = torch.softmax(output['logits'], dim=-1).cpu().detach().float().tolist()
86
- # pred = torch.max(prob, dim=-1)[1].cpu().detach().int().tolist()
87
- # preds += [[self.id_to_label[_p] for _p in p] for p in pred]
88
- # return [{"label": p, "input_id": i} for p, i in zip(preds, inputs)]
89
- #
90
- #
91
- # if __name__ == '__main__':
92
- # model_list = [
93
- # "roberta-base",
94
- # "bertweet-base",
95
- # "bernice",
96
- # "roberta-large",
97
- # "bertweet-large",
98
- # "twitter-roberta-base-2019-90m",
99
- # "twitter-roberta-base-dec2020",
100
- # "twitter-roberta-base-2021-124m",
101
- # "twitter-roberta-base-2022-154m",
102
- # "twitter-roberta-large-2022-154m"
103
- # ]
104
- # for model_m in model_list:
105
- # alias = f"tweettemposhift/ner-ner_temporal-{model_m}"
106
- # NER(alias).get_prediction(export_dir=f"prediction_files/{os.path.basename(alias)}", batch_size=32)
107
- # for random_r in range(4):
108
- # for seed_s in range(3):
109
- # alias = f"tweettemposhift/ner-ner_random{random_r}_seed{seed_s}-{model_m}"
110
- # TopicClassification(alias).get_prediction(export_dir=f"prediction_files/{os.path.basename(alias)}", batch_size=32)
 
1
+ import re
2
+ import os
3
+ import torch
4
+ import json
5
+ from typing import Dict, List
6
+ from transformers import AutoTokenizer, AutoModelForTokenClassification, AutoConfig
7
+ from datasets import load_dataset
8
+
9
+
10
+ URL_RE = re.compile(r"https?:\/\/[\w\.\/\?\=\d&#%_:/-]+")
11
+ HANDLE_RE = re.compile(r"@\w+")
12
+
13
+
14
+ def preprocess_bernice(text):
15
+ text = HANDLE_RE.sub("@USER", text)
16
+ text = URL_RE.sub("HTTPURL", text)
17
+ return text
18
+
19
+
20
+ def preprocess_timelm(text):
21
+ text = HANDLE_RE.sub("@user", text)
22
+ text = URL_RE.sub("http", text)
23
+ return text
24
+
25
+
26
+ def preprocess(model_name, text):
27
+ if model_name == "jhu-clsp/bernice":
28
+ return preprocess_bernice(text)
29
+ if "twitter-roberta-base" in model_name:
30
+ return preprocess_timelm(text)
31
+ return text
32
+
33
+
34
+ class NER:
35
+
36
+ def __init__(self, model_name: str, max_length: int, id_to_label: Dict[str, str]):
37
+ self.model_name = model_name
38
+ self.config = AutoConfig.from_pretrained(self.model_name)
39
+ self.model = AutoModelForTokenClassification.from_pretrained(self.model_name, config=self.config)
40
+ self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
41
+ self.max_length = max_length
42
+ self.id_to_label = id_to_label
43
+ # GPU setup (https://github.com/cardiffnlp/tweetnlp/issues/15)
44
+ if torch.cuda.is_available() and torch.cuda.device_count() > 0:
45
+ self.device = torch.device('cuda')
46
+ elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available() and torch.backends.mps.is_built():
47
+ self.device = torch.device("mps")
48
+ else:
49
+ self.device = torch.device('cpu')
50
+ self.parallel = torch.cuda.device_count() > 1
51
+ if self.parallel:
52
+ self.model = torch.nn.DataParallel(self.model)
53
+ self.model.to(self.device)
54
+ self.model.eval()
55
+ self.dataset = load_dataset("tweettemposhift/tweet_temporal_shift", "ner_temporal")
56
+
57
+ def get_prediction(self, export_dir: str, batch_size: int):
58
+ os.makedirs(export_dir, exist_ok=True)
59
+ for test_split in ["test_1", "test_2", "test_3", "test_4"]:
60
+ data = self.dataset[test_split]
61
+ predictions = self.predict(data["text"], batch_size)
62
+ with open(f"{export_dir}/{test_split}.jsonl", "w") as f:
63
+ f.write("\n".join([json.dumps(i) for i in predictions]))
64
+
65
+ with open(export_dir, "w") as f:
66
+ predictions = self.predict(self.dataset[], batch_size)
67
+ for i in :
68
+ f.write(json.dumps(i) + "\n")
69
+
70
+ def predict(self, text: List[str], batch_size: int):
71
+ text = [[preprocess(self.model_name, t) for t in i] for i in text]
72
+ indices = list(range(0, len(text), batch_size)) + [len(text) + 1]
73
+ inputs = []
74
+ preds = []
75
+ with torch.no_grad():
76
+ for i in range(len(indices) - 1):
77
+ encoded_input = self.tokenizer.batch_encode_plus(
78
+ text[indices[i]: indices[i + 1]],
79
+ max_length=self.max_length,
80
+ return_tensors='pt',
81
+ padding=True,
82
+ truncation=True)
83
+ inputs += encoded_input['input_ids'].cpu().detach().int().tolist()
84
+ output = self.model(**{k: v.to(self.device) for k, v in encoded_input.items()})
85
+ prob = torch.softmax(output['logits'], dim=-1).cpu().detach().float().tolist()
86
+ pred = torch.max(prob, dim=-1)[1].cpu().detach().int().tolist()
87
+ preds += [[self.id_to_label[_p] for _p in p] for p in pred]
88
+ return [{"label": p, "input_id": i} for p, i in zip(preds, inputs)]
89
+
90
+
91
+ if __name__ == '__main__':
92
+ model_list = [
93
+ "roberta-base",
94
+ "bertweet-base",
95
+ "bernice",
96
+ "roberta-large",
97
+ "bertweet-large",
98
+ "twitter-roberta-base-2019-90m",
99
+ "twitter-roberta-base-dec2020",
100
+ "twitter-roberta-base-2021-124m",
101
+ "twitter-roberta-base-2022-154m",
102
+ "twitter-roberta-large-2022-154m"
103
+ ]
104
+ for model_m in model_list:
105
+ alias = f"tweettemposhift/ner-ner_temporal-{model_m}"
106
+ NER(alias).get_prediction(export_dir=f"prediction_files/{os.path.basename(alias)}", batch_size=32)
107
+ for random_r in range(4):
108
+ for seed_s in range(3):
109
+ alias = f"tweettemposhift/ner-ner_random{random_r}_seed{seed_s}-{model_m}"
110
+ TopicClassification(alias).get_prediction(export_dir=f"prediction_files/{os.path.basename(alias)}", batch_size=32)