add chinese
Browse files- experiments/huggingface_ops.py +3 -1
- experiments/main.sh +1 -30
- experiments/model_predict_classifier.py +74 -16
experiments/huggingface_ops.py
CHANGED
@@ -3,8 +3,10 @@ from pprint import pprint
|
|
3 |
|
4 |
api = HfApi()
|
5 |
models = api.list_models(filter=ModelFilter(author='tweettemposhift'))
|
6 |
-
models_filtered = [i.modelId for i in models if '
|
7 |
pprint(sorted(models_filtered))
|
|
|
|
|
8 |
# models = api.list_models(filter=ModelFilter(author='tweettemposhift'))
|
9 |
# models_filtered = [i.modelId for i in models if 'topic-' in i.modelId]
|
10 |
# pprint(sorted([i for i in models_filtered if i.endswith('twitter-roberta-base-2019-90m')]))
|
|
|
3 |
|
4 |
api = HfApi()
|
5 |
models = api.list_models(filter=ModelFilter(author='tweettemposhift'))
|
6 |
+
models_filtered = [i.modelId for i in models if 'emoji' in i.modelId]
|
7 |
pprint(sorted(models_filtered))
|
8 |
+
for i in models_filtered:
|
9 |
+
api.delete_repo(i, repo_type="model")
|
10 |
# models = api.list_models(filter=ModelFilter(author='tweettemposhift'))
|
11 |
# models_filtered = [i.modelId for i in models if 'topic-' in i.modelId]
|
12 |
# pprint(sorted([i for i in models_filtered if i.endswith('twitter-roberta-base-2019-90m')]))
|
experiments/main.sh
CHANGED
@@ -5,70 +5,41 @@ MODEL="jhu-clsp/bernice"
|
|
5 |
MODEL="roberta-large"
|
6 |
MODEL="vinai/bertweet-large"
|
7 |
MODEL="cardiffnlp/twitter-roberta-base-2019-90m"
|
8 |
-
|
9 |
MODEL="cardiffnlp/twitter-roberta-base-dec2020"
|
10 |
MODEL="cardiffnlp/twitter-roberta-base-2021-124m"
|
11 |
-
MODEL="cardiffnlp/twitter-roberta-base-2022-154m"
|
12 |
-
|
13 |
MODEL="cardiffnlp/twitter-roberta-large-2022-154m"
|
14 |
-
|
15 |
|
16 |
# EMOJI
|
17 |
python model_finetuning_emoji.py -m "${MODEL}" -d "emoji_temporal"
|
18 |
-
rm -rf "ckpt/emoji*${MODEL##*/}"
|
19 |
python model_finetuning_emoji.py -m "${MODEL}" -d "emoji_random0_seed0"
|
20 |
-
rm -rf "ckpt/emoji*${MODEL##*/}"
|
21 |
python model_finetuning_emoji.py -m "${MODEL}" -d "emoji_random1_seed0"
|
22 |
-
rm -rf "ckpt/emoji*${MODEL##*/}"
|
23 |
python model_finetuning_emoji.py -m "${MODEL}" -d "emoji_random2_seed0"
|
24 |
-
rm -rf "ckpt/emoji*${MODEL##*/}"
|
25 |
python model_finetuning_emoji.py -m "${MODEL}" -d "emoji_random3_seed0"
|
26 |
-
rm -rf "ckpt/emoji*${MODEL##*/}"
|
27 |
python model_finetuning_emoji.py -m "${MODEL}" -d "emoji_random0_seed1"
|
28 |
-
rm -rf "ckpt/emoji*${MODEL##*/}"
|
29 |
python model_finetuning_emoji.py -m "${MODEL}" -d "emoji_random1_seed1"
|
30 |
-
rm -rf "ckpt/emoji*${MODEL##*/}"
|
31 |
python model_finetuning_emoji.py -m "${MODEL}" -d "emoji_random2_seed1"
|
32 |
-
rm -rf "ckpt/emoji*${MODEL##*/}"
|
33 |
python model_finetuning_emoji.py -m "${MODEL}" -d "emoji_random3_seed1"
|
34 |
-
rm -rf "ckpt/emoji*${MODEL##*/}"
|
35 |
python model_finetuning_emoji.py -m "${MODEL}" -d "emoji_random0_seed2"
|
36 |
-
rm -rf "ckpt/emoji*${MODEL##*/}"
|
37 |
python model_finetuning_emoji.py -m "${MODEL}" -d "emoji_random1_seed2"
|
38 |
-
rm -rf "ckpt/emoji*${MODEL##*/}"
|
39 |
python model_finetuning_emoji.py -m "${MODEL}" -d "emoji_random2_seed2"
|
40 |
-
rm -rf "ckpt/emoji*${MODEL##*/}"
|
41 |
python model_finetuning_emoji.py -m "${MODEL}" -d "emoji_random3_seed2"
|
42 |
-
rm -rf "ckpt/emoji*${MODEL##*/}"
|
43 |
|
44 |
|
45 |
# HATE
|
46 |
python model_finetuning_hate.py -m "${MODEL}" -d "hate_temporal"
|
47 |
-
rm -rf "ckpt/hate*${MODEL##*/}"
|
48 |
python model_finetuning_hate.py -m "${MODEL}" -d "hate_random0_seed0"
|
49 |
-
rm -rf "ckpt/hate*${MODEL##*/}"
|
50 |
python model_finetuning_hate.py -m "${MODEL}" -d "hate_random1_seed0"
|
51 |
-
rm -rf "ckpt/hate*${MODEL##*/}"
|
52 |
python model_finetuning_hate.py -m "${MODEL}" -d "hate_random2_seed0"
|
53 |
-
rm -rf "ckpt/hate*${MODEL##*/}"
|
54 |
python model_finetuning_hate.py -m "${MODEL}" -d "hate_random3_seed0"
|
55 |
-
rm -rf "ckpt/hate*${MODEL##*/}"
|
56 |
python model_finetuning_hate.py -m "${MODEL}" -d "hate_random0_seed1"
|
57 |
-
rm -rf "ckpt/hate*${MODEL##*/}"
|
58 |
python model_finetuning_hate.py -m "${MODEL}" -d "hate_random1_seed1"
|
59 |
-
rm -rf "ckpt/hate*${MODEL##*/}"
|
60 |
python model_finetuning_hate.py -m "${MODEL}" -d "hate_random2_seed1"
|
61 |
-
rm -rf "ckpt/hate*${MODEL##*/}"
|
62 |
python model_finetuning_hate.py -m "${MODEL}" -d "hate_random3_seed1"
|
63 |
-
rm -rf "ckpt/hate*${MODEL##*/}"
|
64 |
python model_finetuning_hate.py -m "${MODEL}" -d "hate_random0_seed2"
|
65 |
-
rm -rf "ckpt/hate*${MODEL##*/}"
|
66 |
python model_finetuning_hate.py -m "${MODEL}" -d "hate_random1_seed2"
|
67 |
-
rm -rf "ckpt/hate*${MODEL##*/}"
|
68 |
python model_finetuning_hate.py -m "${MODEL}" -d "hate_random2_seed2"
|
69 |
-
rm -rf "ckpt/hate*${MODEL##*/}"
|
70 |
python model_finetuning_hate.py -m "${MODEL}" -d "hate_random3_seed2"
|
71 |
-
rm -rf "ckpt/hate*${MODEL##*/}"
|
72 |
|
73 |
|
74 |
# SENTIMENT
|
|
|
5 |
MODEL="roberta-large"
|
6 |
MODEL="vinai/bertweet-large"
|
7 |
MODEL="cardiffnlp/twitter-roberta-base-2019-90m"
|
|
|
8 |
MODEL="cardiffnlp/twitter-roberta-base-dec2020"
|
9 |
MODEL="cardiffnlp/twitter-roberta-base-2021-124m"
|
|
|
|
|
10 |
MODEL="cardiffnlp/twitter-roberta-large-2022-154m"
|
11 |
+
MODEL="cardiffnlp/twitter-roberta-base-2022-154m"
|
12 |
|
13 |
# EMOJI
|
14 |
python model_finetuning_emoji.py -m "${MODEL}" -d "emoji_temporal"
|
|
|
15 |
python model_finetuning_emoji.py -m "${MODEL}" -d "emoji_random0_seed0"
|
|
|
16 |
python model_finetuning_emoji.py -m "${MODEL}" -d "emoji_random1_seed0"
|
|
|
17 |
python model_finetuning_emoji.py -m "${MODEL}" -d "emoji_random2_seed0"
|
|
|
18 |
python model_finetuning_emoji.py -m "${MODEL}" -d "emoji_random3_seed0"
|
|
|
19 |
python model_finetuning_emoji.py -m "${MODEL}" -d "emoji_random0_seed1"
|
|
|
20 |
python model_finetuning_emoji.py -m "${MODEL}" -d "emoji_random1_seed1"
|
|
|
21 |
python model_finetuning_emoji.py -m "${MODEL}" -d "emoji_random2_seed1"
|
|
|
22 |
python model_finetuning_emoji.py -m "${MODEL}" -d "emoji_random3_seed1"
|
|
|
23 |
python model_finetuning_emoji.py -m "${MODEL}" -d "emoji_random0_seed2"
|
|
|
24 |
python model_finetuning_emoji.py -m "${MODEL}" -d "emoji_random1_seed2"
|
|
|
25 |
python model_finetuning_emoji.py -m "${MODEL}" -d "emoji_random2_seed2"
|
|
|
26 |
python model_finetuning_emoji.py -m "${MODEL}" -d "emoji_random3_seed2"
|
|
|
27 |
|
28 |
|
29 |
# HATE
|
30 |
python model_finetuning_hate.py -m "${MODEL}" -d "hate_temporal"
|
|
|
31 |
python model_finetuning_hate.py -m "${MODEL}" -d "hate_random0_seed0"
|
|
|
32 |
python model_finetuning_hate.py -m "${MODEL}" -d "hate_random1_seed0"
|
|
|
33 |
python model_finetuning_hate.py -m "${MODEL}" -d "hate_random2_seed0"
|
|
|
34 |
python model_finetuning_hate.py -m "${MODEL}" -d "hate_random3_seed0"
|
|
|
35 |
python model_finetuning_hate.py -m "${MODEL}" -d "hate_random0_seed1"
|
|
|
36 |
python model_finetuning_hate.py -m "${MODEL}" -d "hate_random1_seed1"
|
|
|
37 |
python model_finetuning_hate.py -m "${MODEL}" -d "hate_random2_seed1"
|
|
|
38 |
python model_finetuning_hate.py -m "${MODEL}" -d "hate_random3_seed1"
|
|
|
39 |
python model_finetuning_hate.py -m "${MODEL}" -d "hate_random0_seed2"
|
|
|
40 |
python model_finetuning_hate.py -m "${MODEL}" -d "hate_random1_seed2"
|
|
|
41 |
python model_finetuning_hate.py -m "${MODEL}" -d "hate_random2_seed2"
|
|
|
42 |
python model_finetuning_hate.py -m "${MODEL}" -d "hate_random3_seed2"
|
|
|
43 |
|
44 |
|
45 |
# SENTIMENT
|
experiments/model_predict_classifier.py
CHANGED
@@ -140,6 +140,44 @@ class SentimentClassification(Classifier):
|
|
140 |
f.write("\n".join([json.dumps(i) for i in predictions]))
|
141 |
|
142 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
143 |
class NERDClassification(Classifier):
|
144 |
|
145 |
id_to_label = {'0': '0', '1': '1'}
|
@@ -178,30 +216,50 @@ if __name__ == '__main__':
|
|
178 |
]
|
179 |
for model_m in model_list:
|
180 |
alias = f"tweettemposhift/topic-topic_temporal-{model_m}"
|
181 |
-
|
182 |
torch.cuda.empty_cache()
|
183 |
for random_r in range(4):
|
184 |
for seed_s in range(3):
|
185 |
-
alias = f"tweettemposhift/
|
186 |
TopicClassification(alias).get_prediction(export_dir=f"prediction_files/{os.path.basename(alias)}", batch_size=512)
|
187 |
torch.cuda.empty_cache()
|
188 |
|
189 |
for model_m in model_list:
|
190 |
-
alias = f"tweettemposhift/
|
191 |
-
|
192 |
torch.cuda.empty_cache()
|
193 |
for random_r in range(4):
|
194 |
for seed_s in range(3):
|
195 |
-
alias = f"tweettemposhift/
|
196 |
-
|
197 |
-
torch.cuda.empty_cache()
|
198 |
-
|
199 |
-
for model_m in model_list:
|
200 |
-
alias = f"tweettemposhift/nerd-nerd_temporal-{model_m}"
|
201 |
-
NERDClassification(alias).get_prediction(export_dir=f"prediction_files/{os.path.basename(alias)}", batch_size=512)
|
202 |
-
torch.cuda.empty_cache()
|
203 |
-
for random_r in range(4):
|
204 |
-
for seed_s in range(3):
|
205 |
-
alias = f"tweettemposhift/nerd-nerd_random{random_r}_seed{seed_s}-{model_m}"
|
206 |
-
NERDClassification(alias).get_prediction(export_dir=f"prediction_files/{os.path.basename(alias)}", batch_size=512)
|
207 |
torch.cuda.empty_cache()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
140 |
f.write("\n".join([json.dumps(i) for i in predictions]))
|
141 |
|
142 |
|
143 |
+
class HateClassification(Classifier):
|
144 |
+
|
145 |
+
id_to_label = {'0': '0', '1': '1'}
|
146 |
+
|
147 |
+
def __init__(self, model_name: str):
|
148 |
+
super().__init__(model_name, max_length=128, multi_label=False, id_to_label=self.id_to_label)
|
149 |
+
self.dataset = load_dataset("tweettemposhift/tweet_temporal_shift", "hate_temporal")
|
150 |
+
|
151 |
+
def get_prediction(self, export_dir: str, batch_size: int):
|
152 |
+
os.makedirs(export_dir, exist_ok=True)
|
153 |
+
for test_split in ["test_1", "test_2", "test_3", "test_4"]:
|
154 |
+
if os.path.exists(f"{export_dir}/{test_split}.jsonl"):
|
155 |
+
continue
|
156 |
+
data = self.dataset[test_split]
|
157 |
+
predictions = self.predict(data["text"], batch_size)
|
158 |
+
with open(f"{export_dir}/{test_split}.jsonl", "w") as f:
|
159 |
+
f.write("\n".join([json.dumps(i) for i in predictions]))
|
160 |
+
|
161 |
+
|
162 |
+
class EmojiClassification(Classifier):
|
163 |
+
|
164 |
+
def __init__(self, model_name: str):
|
165 |
+
self.dataset = load_dataset("tweettemposhift/tweet_temporal_shift", "hate_temporal")
|
166 |
+
id_to_label = dict(enumerate(self.dataset["test"].features["gold_label"].names))
|
167 |
+
super().__init__(model_name, max_length=128, multi_label=False, id_to_label=id_to_label)
|
168 |
+
|
169 |
+
def get_prediction(self, export_dir: str, batch_size: int):
|
170 |
+
os.makedirs(export_dir, exist_ok=True)
|
171 |
+
for test_split in ["test_1", "test_2", "test_3", "test_4"]:
|
172 |
+
if os.path.exists(f"{export_dir}/{test_split}.jsonl"):
|
173 |
+
continue
|
174 |
+
data = self.dataset[test_split]
|
175 |
+
predictions = self.predict(data["text"], batch_size)
|
176 |
+
with open(f"{export_dir}/{test_split}.jsonl", "w") as f:
|
177 |
+
f.write("\n".join([json.dumps(i) for i in predictions]))
|
178 |
+
|
179 |
+
|
180 |
+
|
181 |
class NERDClassification(Classifier):
|
182 |
|
183 |
id_to_label = {'0': '0', '1': '1'}
|
|
|
216 |
]
|
217 |
for model_m in model_list:
|
218 |
alias = f"tweettemposhift/topic-topic_temporal-{model_m}"
|
219 |
+
HateClassification(alias).get_prediction(export_dir=f"prediction_files/{os.path.basename(alias)}", batch_size=512)
|
220 |
torch.cuda.empty_cache()
|
221 |
for random_r in range(4):
|
222 |
for seed_s in range(3):
|
223 |
+
alias = f"tweettemposhift/hate-hate_random{random_r}_seed{seed_s}-{model_m}"
|
224 |
TopicClassification(alias).get_prediction(export_dir=f"prediction_files/{os.path.basename(alias)}", batch_size=512)
|
225 |
torch.cuda.empty_cache()
|
226 |
|
227 |
for model_m in model_list:
|
228 |
+
alias = f"tweettemposhift/emoji-emoji_temporal-{model_m}"
|
229 |
+
EmojiClassification(alias).get_prediction(export_dir=f"prediction_files/{os.path.basename(alias)}", batch_size=512)
|
230 |
torch.cuda.empty_cache()
|
231 |
for random_r in range(4):
|
232 |
for seed_s in range(3):
|
233 |
+
alias = f"tweettemposhift/emoji-emoji_random{random_r}_seed{seed_s}-{model_m}"
|
234 |
+
TopicClassification(alias).get_prediction(export_dir=f"prediction_files/{os.path.basename(alias)}", batch_size=512)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
235 |
torch.cuda.empty_cache()
|
236 |
+
#
|
237 |
+
# for model_m in model_list:
|
238 |
+
# alias = f"tweettemposhift/topic-topic_temporal-{model_m}"
|
239 |
+
# TopicClassification(alias).get_prediction(export_dir=f"prediction_files/{os.path.basename(alias)}", batch_size=512)
|
240 |
+
# torch.cuda.empty_cache()
|
241 |
+
# for random_r in range(4):
|
242 |
+
# for seed_s in range(3):
|
243 |
+
# alias = f"tweettemposhift/topic-topic_random{random_r}_seed{seed_s}-{model_m}"
|
244 |
+
# TopicClassification(alias).get_prediction(export_dir=f"prediction_files/{os.path.basename(alias)}", batch_size=512)
|
245 |
+
# torch.cuda.empty_cache()
|
246 |
+
#
|
247 |
+
# for model_m in model_list:
|
248 |
+
# alias = f"tweettemposhift/sentiment-sentiment_small_temporal-{model_m}"
|
249 |
+
# SentimentClassification(alias).get_prediction(export_dir=f"prediction_files/{os.path.basename(alias)}", batch_size=512)
|
250 |
+
# torch.cuda.empty_cache()
|
251 |
+
# for random_r in range(4):
|
252 |
+
# for seed_s in range(3):
|
253 |
+
# alias = f"tweettemposhift/sentiment-sentiment_small_random{random_r}_seed{seed_s}-{model_m}"
|
254 |
+
# SentimentClassification(alias).get_prediction(export_dir=f"prediction_files/{os.path.basename(alias)}", batch_size=512)
|
255 |
+
# torch.cuda.empty_cache()
|
256 |
+
#
|
257 |
+
# for model_m in model_list:
|
258 |
+
# alias = f"tweettemposhift/nerd-nerd_temporal-{model_m}"
|
259 |
+
# NERDClassification(alias).get_prediction(export_dir=f"prediction_files/{os.path.basename(alias)}", batch_size=512)
|
260 |
+
# torch.cuda.empty_cache()
|
261 |
+
# for random_r in range(4):
|
262 |
+
# for seed_s in range(3):
|
263 |
+
# alias = f"tweettemposhift/nerd-nerd_random{random_r}_seed{seed_s}-{model_m}"
|
264 |
+
# NERDClassification(alias).get_prediction(export_dir=f"prediction_files/{os.path.basename(alias)}", batch_size=512)
|
265 |
+
# torch.cuda.empty_cache()
|