asahi417 commited on
Commit
50f0349
·
1 Parent(s): 35c40af
experiments/analysis/nerd.csv ADDED
The diff for this file is too large to render. See raw diff
 
experiments/analysis/sentiment.csv ADDED
The diff for this file is too large to render. See raw diff
 
experiments/analysis/topic.csv ADDED
The diff for this file is too large to render. See raw diff
 
experiments/analysis_prediction.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+
4
+ import pandas as pd
5
+ from datasets import load_dataset
6
+
7
+ root_dir = "experiments/prediction_files"
8
+ id_to_label = {
9
+ '0': 'arts_&_culture',
10
+ '1': 'business_&_entrepreneurs',
11
+ '2': 'celebrity_&_pop_culture',
12
+ '3': 'diaries_&_daily_life',
13
+ '4': 'family',
14
+ '5': 'fashion_&_style',
15
+ '6': 'film_tv_&_video',
16
+ '7': 'fitness_&_health',
17
+ '8': 'food_&_dining',
18
+ '9': 'gaming',
19
+ '10': 'learning_&_educational',
20
+ '11': 'music',
21
+ '12': 'news_&_social_concern',
22
+ '13': 'other_hobbies',
23
+ '14': 'relationships',
24
+ '15': 'science_&_technology',
25
+ '16': 'sports',
26
+ '17': 'travel_&_adventure',
27
+ '18': 'youth_&_student_life'
28
+ }
29
+ tasks = ["nerd", "sentiment", "topic"]
30
+ splits = ["test_1", "test_2", "test_3", "test_4"]
31
+ model_list = [
32
+ "roberta-base",
33
+ "bertweet-base",
34
+ "bernice",
35
+ "roberta-large",
36
+ "bertweet-large",
37
+ "twitter-roberta-base-2019-90m",
38
+ "twitter-roberta-base-dec2020",
39
+ "twitter-roberta-base-2021-124m",
40
+ "twitter-roberta-base-2022-154m",
41
+ "twitter-roberta-large-2022-154m"
42
+ ]
43
+ references = {}
44
+
45
+ for task in tasks:
46
+ references[task] = {}
47
+ for s in splits:
48
+ data = load_dataset("tweettemposhift/tweet_temporal_shift", f"{task}_temporal", split=s)
49
+ if task in ["nerd", "sentiment"]:
50
+ references[task][s] = [str(i) for i in data['gold_label_binary']]
51
+ else:
52
+ references[task][s] = [{id_to_label[str(n)] for n, k in enumerate(i) if k == 1} for i in data['gold_label_list']]
53
+
54
+ os.makedirs("experiments/analysis", exist_ok=True)
55
+
56
+ output = {}
57
+ for model_m in model_list:
58
+ flags = []
59
+ for s in splits:
60
+ with open(f"{root_dir}/topic-topic_temporal-{model_m}/{s}.jsonl") as f:
61
+ pred = [set(json.loads(i)["label"]) for i in f.read().split('\n') if len(i)]
62
+ flags += [a == b for a, b in zip(references["topic"][s], pred)]
63
+ count = {}
64
+ for seed_s in range(3):
65
+ flags_rand = []
66
+ for random_r in range(4):
67
+ with open(f"{root_dir}/topic-topic_random{random_r}_seed{seed_s}-{model_m}/test_{random_r + 1}.jsonl") as f:
68
+ pred = [set(json.loads(i)["label"]) for i in f.read().split('\n') if len(i)]
69
+ flags_rand += [a == b for a, b in zip(references["topic"][f"test_{random_r + 1}"], pred)]
70
+ count[f"{model_m}_{seed_s}"] = [not x and y for x, y in zip(flags, flags_rand)]
71
+ output[model_m] = pd.DataFrame(count).sum(1)
72
+
73
+ df_main = []
74
+ for s in splits:
75
+ df_main.append(load_dataset("tweettemposhift/tweet_temporal_shift", "topic_temporal", split=s).to_pandas())
76
+ df_main = pd.concat(df_main)
77
+ df_main["error_count"] = pd.DataFrame(output).sum(1).values
78
+ df_main["gold_label_list"] = [", ".join([id_to_label[str(n)] for n, k in enumerate(i) if k == 1]) for i in df_main['gold_label_list']]
79
+ df_main.sort_values("error_count", ascending=False).to_csv("experiments/analysis/topic.csv")
80
+
81
+
82
+ output = {}
83
+ for model_m in model_list:
84
+ flags = []
85
+ for s in splits:
86
+ with open(f"{root_dir}/nerd-nerd_temporal-{model_m}/{s}.jsonl") as f:
87
+ pred = [json.loads(i)["label"] for i in f.read().split('\n') if len(i)]
88
+ flags += [a == b for a, b in zip(references["nerd"][s], pred)]
89
+ count = {}
90
+ for seed_s in range(3):
91
+ flags_rand = []
92
+ for random_r in range(4):
93
+ with open(f"{root_dir}/nerd-nerd_random{random_r}_seed{seed_s}-{model_m}/test_{random_r + 1}.jsonl") as f:
94
+ pred = [json.loads(i)["label"] for i in f.read().split('\n') if len(i)]
95
+ flags_rand += [a == b for a, b in zip(references["nerd"][f"test_{random_r + 1}"], pred)]
96
+ count[f"{model_m}_{seed_s}"] = [not x and y for x, y in zip(flags, flags_rand)]
97
+ output[model_m] = pd.DataFrame(count).sum(1)
98
+
99
+ df_main = []
100
+ for s in splits:
101
+ df_main.append(load_dataset("tweettemposhift/tweet_temporal_shift", "nerd_temporal", split=s).to_pandas())
102
+ df_main = pd.concat(df_main)
103
+ df_main["error_count"] = pd.DataFrame(output).sum(1).values
104
+ df_main.sort_values("error_count", ascending=False).to_csv("experiments/analysis/nerd.csv")
105
+
106
+
107
+ output = {}
108
+ for model_m in model_list:
109
+ flags = []
110
+ for s in splits:
111
+ with open(f"{root_dir}/sentiment-sentiment_small_temporal-{model_m}/{s}.jsonl") as f:
112
+ pred = [json.loads(i)["label"] for i in f.read().split('\n') if len(i)]
113
+ flags += [a == b for a, b in zip(references["sentiment"][s], pred)]
114
+ count = {}
115
+ for seed_s in range(3):
116
+ flags_rand = []
117
+ for random_r in range(4):
118
+ with open(f"{root_dir}/sentiment-sentiment_small_random{random_r}_seed{seed_s}-{model_m}/test_{random_r + 1}.jsonl") as f:
119
+ pred = [json.loads(i)["label"] for i in f.read().split('\n') if len(i)]
120
+ flags_rand += [a == b for a, b in zip(references["sentiment"][f"test_{random_r + 1}"], pred)]
121
+ count[f"{model_m}_{seed_s}"] = [not x and y for x, y in zip(flags, flags_rand)]
122
+ output[model_m] = pd.DataFrame(count).sum(1)
123
+
124
+ df_main = []
125
+ for s in splits:
126
+ df_main.append(load_dataset("tweettemposhift/tweet_temporal_shift", "sentiment_small_temporal", split=s).to_pandas())
127
+ df_main = pd.concat(df_main)
128
+ df_main["error_count"] = pd.DataFrame(output).sum(1).values
129
+ df_main.sort_values("error_count", ascending=False).to_csv("experiments/analysis/sentiment.csv")
experiments/model_predict_classifier.py CHANGED
@@ -113,6 +113,8 @@ class TopicClassification(Classifier):
113
  def get_prediction(self, export_dir: str, batch_size: int):
114
  os.makedirs(export_dir, exist_ok=True)
115
  for test_split in ["test_1", "test_2", "test_3", "test_4"]:
 
 
116
  data = self.dataset[test_split]
117
  predictions = self.predict(data["text"], batch_size)
118
  with open(f"{export_dir}/{test_split}.jsonl", "w") as f:
@@ -130,6 +132,8 @@ class SentimentClassification(Classifier):
130
  def get_prediction(self, export_dir: str, batch_size: int):
131
  os.makedirs(export_dir, exist_ok=True)
132
  for test_split in ["test_1", "test_2", "test_3", "test_4"]:
 
 
133
  data = self.dataset[test_split]
134
  predictions = self.predict(data["text"], batch_size)
135
  with open(f"{export_dir}/{test_split}.jsonl", "w") as f:
@@ -147,6 +151,8 @@ class NERDClassification(Classifier):
147
  def get_prediction(self, export_dir: str, batch_size: int):
148
  os.makedirs(export_dir, exist_ok=True)
149
  for test_split in ["test_1", "test_2", "test_3", "test_4"]:
 
 
150
  data = self.dataset[test_split]
151
  text = [
152
  f"{d['target']} {self.tokenizer.sep_token} {d['definition']} {self.tokenizer.sep_token} {d['text']}"
 
113
  def get_prediction(self, export_dir: str, batch_size: int):
114
  os.makedirs(export_dir, exist_ok=True)
115
  for test_split in ["test_1", "test_2", "test_3", "test_4"]:
116
+ if os.path.exists(f"{export_dir}/{test_split}.jsonl"):
117
+ continue
118
  data = self.dataset[test_split]
119
  predictions = self.predict(data["text"], batch_size)
120
  with open(f"{export_dir}/{test_split}.jsonl", "w") as f:
 
132
  def get_prediction(self, export_dir: str, batch_size: int):
133
  os.makedirs(export_dir, exist_ok=True)
134
  for test_split in ["test_1", "test_2", "test_3", "test_4"]:
135
+ if os.path.exists(f"{export_dir}/{test_split}.jsonl"):
136
+ continue
137
  data = self.dataset[test_split]
138
  predictions = self.predict(data["text"], batch_size)
139
  with open(f"{export_dir}/{test_split}.jsonl", "w") as f:
 
151
  def get_prediction(self, export_dir: str, batch_size: int):
152
  os.makedirs(export_dir, exist_ok=True)
153
  for test_split in ["test_1", "test_2", "test_3", "test_4"]:
154
+ if os.path.exists(f"{export_dir}/{test_split}.jsonl"):
155
+ continue
156
  data = self.dataset[test_split]
157
  text = [
158
  f"{d['target']} {self.tokenizer.sep_token} {d['definition']} {self.tokenizer.sep_token} {d['text']}"
experiments/model_predict_ner.py CHANGED
@@ -1,109 +1,110 @@
1
- import re
2
- import os
3
- import torch
4
- import json
5
- from typing import Dict, List
6
- from transformers import AutoTokenizer, AutoModelForTokenClassification, AutoConfig
7
- from datasets import load_dataset
8
-
9
- URL_RE = re.compile(r"https?:\/\/[\w\.\/\?\=\d&#%_:/-]+")
10
- HANDLE_RE = re.compile(r"@\w+")
11
-
12
-
13
- def preprocess_bernice(text):
14
- text = HANDLE_RE.sub("@USER", text)
15
- text = URL_RE.sub("HTTPURL", text)
16
- return text
17
-
18
-
19
- def preprocess_timelm(text):
20
- text = HANDLE_RE.sub("@user", text)
21
- text = URL_RE.sub("http", text)
22
- return text
23
-
24
-
25
- def preprocess(model_name, text):
26
- if model_name == "jhu-clsp/bernice":
27
- return preprocess_bernice(text)
28
- if "twitter-roberta-base" in model_name:
29
- return preprocess_timelm(text)
30
- return text
31
-
32
-
33
- class NER:
34
-
35
- def __init__(self, model_name: str, max_length: int, id_to_label: Dict[str, str]):
36
- self.model_name = model_name
37
- self.config = AutoConfig.from_pretrained(self.model_name)
38
- self.model = AutoModelForTokenClassification.from_pretrained(self.model_name, config=self.config)
39
- self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
40
- self.max_length = max_length
41
- self.id_to_label = id_to_label
42
- # GPU setup (https://github.com/cardiffnlp/tweetnlp/issues/15)
43
- if torch.cuda.is_available() and torch.cuda.device_count() > 0:
44
- self.device = torch.device('cuda')
45
- elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available() and torch.backends.mps.is_built():
46
- self.device = torch.device("mps")
47
- else:
48
- self.device = torch.device('cpu')
49
- self.parallel = torch.cuda.device_count() > 1
50
- if self.parallel:
51
- self.model = torch.nn.DataParallel(self.model)
52
- self.model.to(self.device)
53
- self.model.eval()
54
- self.dataset = load_dataset("tweettemposhift/tweet_temporal_shift", "ner_temporal")
55
-
56
- def get_prediction(self, export_dir: str, batch_size: int):
57
- os.makedirs(export_dir, exist_ok=True)
58
- for test_split in ["test_1", "test_2", "test_3", "test_4"]:
59
- data = self.dataset[test_split]
60
- predictions = self.predict(data["text"], batch_size)
61
- with open(f"{export_dir}/{test_split}.jsonl", "w") as f:
62
- f.write("\n".join([json.dumps(i) for i in predictions]))
63
-
64
- with open(export_dir, "w") as f:
65
- predictions = self.predict(self.dataset[], batch_size)
66
- for i in :
67
- f.write(json.dumps(i) + "\n")
68
-
69
- def predict(self, text: List[str], batch_size: int):
70
- text = [[preprocess(self.model_name, t) for t in i] for i in text]
71
- indices = list(range(0, len(text), batch_size)) + [len(text) + 1]
72
- inputs = []
73
- preds = []
74
- with torch.no_grad():
75
- for i in range(len(indices) - 1):
76
- encoded_input = self.tokenizer.batch_encode_plus(
77
- text[indices[i]: indices[i + 1]],
78
- max_length=self.max_length,
79
- return_tensors='pt',
80
- padding=True,
81
- truncation=True)
82
- inputs += encoded_input['input_ids'].cpu().detach().int().tolist()
83
- output = self.model(**{k: v.to(self.device) for k, v in encoded_input.items()})
84
- prob = torch.softmax(output['logits'], dim=-1).cpu().detach().float().tolist()
85
- pred = torch.max(prob, dim=-1)[1].cpu().detach().int().tolist()
86
- preds += [[self.id_to_label[_p] for _p in p] for p in pred]
87
- return [{"label": p, "input_id": i} for p, i in zip(preds, inputs)]
88
-
89
-
90
- if __name__ == '__main__':
91
- model_list = [
92
- "roberta-base",
93
- "bertweet-base",
94
- "bernice",
95
- "roberta-large",
96
- "bertweet-large",
97
- "twitter-roberta-base-2019-90m",
98
- "twitter-roberta-base-dec2020",
99
- "twitter-roberta-base-2021-124m",
100
- "twitter-roberta-base-2022-154m",
101
- "twitter-roberta-large-2022-154m"
102
- ]
103
- for model_m in model_list:
104
- alias = f"tweettemposhift/ner-ner_temporal-{model_m}"
105
- NER(alias).get_prediction(export_dir=f"prediction_files/{os.path.basename(alias)}", batch_size=32)
106
- for random_r in range(4):
107
- for seed_s in range(3):
108
- alias = f"tweettemposhift/ner-ner_random{random_r}_seed{seed_s}-{model_m}"
109
- TopicClassification(alias).get_prediction(export_dir=f"prediction_files/{os.path.basename(alias)}", batch_size=32)
 
 
1
+ # #WIP
2
+ # import re
3
+ # import os
4
+ # import torch
5
+ # import json
6
+ # from typing import Dict, List
7
+ # from transformers import AutoTokenizer, AutoModelForTokenClassification, AutoConfig
8
+ # from datasets import load_dataset
9
+ #
10
+ # URL_RE = re.compile(r"https?:\/\/[\w\.\/\?\=\d&#%_:/-]+")
11
+ # HANDLE_RE = re.compile(r"@\w+")
12
+ #
13
+ #
14
+ # def preprocess_bernice(text):
15
+ # text = HANDLE_RE.sub("@USER", text)
16
+ # text = URL_RE.sub("HTTPURL", text)
17
+ # return text
18
+ #
19
+ #
20
+ # def preprocess_timelm(text):
21
+ # text = HANDLE_RE.sub("@user", text)
22
+ # text = URL_RE.sub("http", text)
23
+ # return text
24
+ #
25
+ #
26
+ # def preprocess(model_name, text):
27
+ # if model_name == "jhu-clsp/bernice":
28
+ # return preprocess_bernice(text)
29
+ # if "twitter-roberta-base" in model_name:
30
+ # return preprocess_timelm(text)
31
+ # return text
32
+ #
33
+ #
34
+ # class NER:
35
+ #
36
+ # def __init__(self, model_name: str, max_length: int, id_to_label: Dict[str, str]):
37
+ # self.model_name = model_name
38
+ # self.config = AutoConfig.from_pretrained(self.model_name)
39
+ # self.model = AutoModelForTokenClassification.from_pretrained(self.model_name, config=self.config)
40
+ # self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
41
+ # self.max_length = max_length
42
+ # self.id_to_label = id_to_label
43
+ # # GPU setup (https://github.com/cardiffnlp/tweetnlp/issues/15)
44
+ # if torch.cuda.is_available() and torch.cuda.device_count() > 0:
45
+ # self.device = torch.device('cuda')
46
+ # elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available() and torch.backends.mps.is_built():
47
+ # self.device = torch.device("mps")
48
+ # else:
49
+ # self.device = torch.device('cpu')
50
+ # self.parallel = torch.cuda.device_count() > 1
51
+ # if self.parallel:
52
+ # self.model = torch.nn.DataParallel(self.model)
53
+ # self.model.to(self.device)
54
+ # self.model.eval()
55
+ # self.dataset = load_dataset("tweettemposhift/tweet_temporal_shift", "ner_temporal")
56
+ #
57
+ # def get_prediction(self, export_dir: str, batch_size: int):
58
+ # os.makedirs(export_dir, exist_ok=True)
59
+ # for test_split in ["test_1", "test_2", "test_3", "test_4"]:
60
+ # data = self.dataset[test_split]
61
+ # predictions = self.predict(data["text"], batch_size)
62
+ # with open(f"{export_dir}/{test_split}.jsonl", "w") as f:
63
+ # f.write("\n".join([json.dumps(i) for i in predictions]))
64
+ #
65
+ # with open(export_dir, "w") as f:
66
+ # predictions = self.predict(self.dataset[], batch_size)
67
+ # for i in :
68
+ # f.write(json.dumps(i) + "\n")
69
+ #
70
+ # def predict(self, text: List[str], batch_size: int):
71
+ # text = [[preprocess(self.model_name, t) for t in i] for i in text]
72
+ # indices = list(range(0, len(text), batch_size)) + [len(text) + 1]
73
+ # inputs = []
74
+ # preds = []
75
+ # with torch.no_grad():
76
+ # for i in range(len(indices) - 1):
77
+ # encoded_input = self.tokenizer.batch_encode_plus(
78
+ # text[indices[i]: indices[i + 1]],
79
+ # max_length=self.max_length,
80
+ # return_tensors='pt',
81
+ # padding=True,
82
+ # truncation=True)
83
+ # inputs += encoded_input['input_ids'].cpu().detach().int().tolist()
84
+ # output = self.model(**{k: v.to(self.device) for k, v in encoded_input.items()})
85
+ # prob = torch.softmax(output['logits'], dim=-1).cpu().detach().float().tolist()
86
+ # pred = torch.max(prob, dim=-1)[1].cpu().detach().int().tolist()
87
+ # preds += [[self.id_to_label[_p] for _p in p] for p in pred]
88
+ # return [{"label": p, "input_id": i} for p, i in zip(preds, inputs)]
89
+ #
90
+ #
91
+ # if __name__ == '__main__':
92
+ # model_list = [
93
+ # "roberta-base",
94
+ # "bertweet-base",
95
+ # "bernice",
96
+ # "roberta-large",
97
+ # "bertweet-large",
98
+ # "twitter-roberta-base-2019-90m",
99
+ # "twitter-roberta-base-dec2020",
100
+ # "twitter-roberta-base-2021-124m",
101
+ # "twitter-roberta-base-2022-154m",
102
+ # "twitter-roberta-large-2022-154m"
103
+ # ]
104
+ # for model_m in model_list:
105
+ # alias = f"tweettemposhift/ner-ner_temporal-{model_m}"
106
+ # NER(alias).get_prediction(export_dir=f"prediction_files/{os.path.basename(alias)}", batch_size=32)
107
+ # for random_r in range(4):
108
+ # for seed_s in range(3):
109
+ # alias = f"tweettemposhift/ner-ner_random{random_r}_seed{seed_s}-{model_m}"
110
+ # TopicClassification(alias).get_prediction(export_dir=f"prediction_files/{os.path.basename(alias)}", batch_size=32)
experiments/prediction.py DELETED
@@ -1,13 +0,0 @@
1
- from transformers import pipeline
2
-
3
- pipe = pipeline(model="tweettemposhift/nerd-nerd_random1_seed2-twitter-roberta-base-2019-90m")
4
- out = pipe("This restaurant is awesome")
5
-
6
- pipe = pipeline(model="tweettemposhift/sentiment-sentiment_small_random3_seed2-twitter-roberta-base-dec2020")
7
- pipe("This restaurant is awesome")
8
-
9
- pipe = pipeline(model="tweettemposhift/topic-topic_random3_seed2-twitter-roberta-base-dec2020")
10
- pipe("This restaurant is awesome")
11
-
12
- pipe = pipeline(model="tweettemposhift/ner-ner_random1_seed2-twitter-roberta-base-2019-90m")
13
- pipe("This restaurant is awesome")