asahi417 commited on
Commit
253c4c9
·
1 Parent(s): 0f52549
experiments/huggingface_ops.py CHANGED
@@ -3,7 +3,7 @@ from pprint import pprint
3
 
4
  api = HfApi()
5
  models = api.list_models(filter=ModelFilter(author='vocabtrimmer'))
6
- models_filtered = [i.modelId for i in models if 'qg-' in i.modelId]
7
  pprint(sorted(models_filtered))
8
  # models = api.list_models(filter=ModelFilter(author='tweettemposhift'))
9
  # models_filtered = [i.modelId for i in models if 'topic-' in i.modelId]
 
3
 
4
  api = HfApi()
5
  models = api.list_models(filter=ModelFilter(author='vocabtrimmer'))
6
+ models_filtered = [i.modelId for i in models if 'cardiffnlp/twitter-roberta-base-jun2020' in i.modelId]
7
  pprint(sorted(models_filtered))
8
  # models = api.list_models(filter=ModelFilter(author='tweettemposhift'))
9
  # models_filtered = [i.modelId for i in models if 'topic-' in i.modelId]
experiments/main.sh CHANGED
@@ -10,12 +10,6 @@ MODEL="cardiffnlp/twitter-roberta-base-2021-124m"
10
  MODEL="cardiffnlp/twitter-roberta-base-2022-154m"
11
  MODEL="cardiffnlp/twitter-roberta-large-2022-154m"
12
 
13
- # ABLATION (TimeLMs)
14
- ## Topic & NER
15
- MODEL="cardiffnlp/twitter-roberta-base-jun2020"
16
- MODEL="cardiffnlp/twitter-roberta-base-sep2021"
17
- ## NERD
18
- MODEL="cardiffnlp/twitter-roberta-base-jun2021"
19
 
20
  # SENTIMENT
21
  python model_finetuning_sentiment.py -m "${MODEL}" -d "sentiment_small_temporal"
@@ -78,34 +72,49 @@ python model_finetuning_topic.py -m "${MODEL}" -d "topic_random2_seed2"
78
  python model_finetuning_topic.py -m "${MODEL}" -d "topic_random3_seed2"
79
 
80
  download () {
81
- git clone "https://huggingface.co/tweettemposhift/ner-${1}-${MODEL##*/}"
82
- mkdir "ckpt/ner-${1}-${MODEL##*/}"
83
- mv "ner-${1}-${MODEL##*/}" "ckpt/ner-${1}-${MODEL##*/}/"
84
- mv "ckpt/ner-${1}-${MODEL##*/}/ner-${1}-${MODEL##*/}" "ckpt/ner-${1}-${MODEL##*/}/best_model"
85
  }
86
- download "ner_temporal"
87
- python model_finetuning_ner.py -m "${MODEL}" -d "ner_temporal" --skip-train
88
- download "ner_random0_seed0"
89
- python model_finetuning_ner.py -m "${MODEL}" -d "ner_random0_seed0" --skip-train
90
- download "ner_random1_seed0"
91
- python model_finetuning_ner.py -m "${MODEL}" -d "ner_random1_seed0" --skip-train
92
- download "ner_random2_seed0"
93
- python model_finetuning_ner.py -m "${MODEL}" -d "ner_random2_seed0" --skip-train
94
- download "ner_random3_seed0"
95
- python model_finetuning_ner.py -m "${MODEL}" -d "ner_random3_seed0" --skip-train
96
- download "ner_random0_seed1"
97
- python model_finetuning_ner.py -m "${MODEL}" -d "ner_random0_seed1" --skip-train
98
- download "ner_random1_seed1"
99
- python model_finetuning_ner.py -m "${MODEL}" -d "ner_random1_seed1" --skip-train
100
- download "ner_random2_seed1"
101
- python model_finetuning_ner.py -m "${MODEL}" -d "ner_random2_seed1" --skip-train
102
- download "ner_random3_seed1"
103
- python model_finetuning_ner.py -m "${MODEL}" -d "ner_random3_seed1" --skip-train
104
- download "ner_random0_seed2"
105
- python model_finetuning_ner.py -m "${MODEL}" -d "ner_random0_seed2" --skip-train
106
- download "ner_random1_seed2"
107
- python model_finetuning_ner.py -m "${MODEL}" -d "ner_random1_seed2" --skip-train
108
- download "ner_random2_seed2"
109
- python model_finetuning_ner.py -m "${MODEL}" -d "ner_random2_seed2" --skip-train
110
- download "ner_random3_seed2"
111
- python model_finetuning_ner.py -m "${MODEL}" -d "ner_random3_seed2" --skip-train
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  MODEL="cardiffnlp/twitter-roberta-base-2022-154m"
11
  MODEL="cardiffnlp/twitter-roberta-large-2022-154m"
12
 
 
 
 
 
 
 
13
 
14
  # SENTIMENT
15
  python model_finetuning_sentiment.py -m "${MODEL}" -d "sentiment_small_temporal"
 
72
  python model_finetuning_topic.py -m "${MODEL}" -d "topic_random3_seed2"
73
 
74
  download () {
75
+ git clone "https://huggingface.co/tweettemposhift/ner-${1}-${2##*/}"
76
+ mkdir "ckpt/ner-${1}-${2##*/}"
77
+ mv "ner-${1}-${2##*/}" "ckpt/ner-${1}-${2##*/}/"
78
+ mv "ckpt/ner-${1}-${2##*/}/ner-${1}-${2##*/}" "ckpt/ner-${1}-${2##*/}/best_model"
79
  }
80
+
81
+ fix () {
82
+ download "ner_temporal" "${1}"
83
+ python model_finetuning_ner.py -m "${1}" -d "ner_temporal" --skip-train
84
+ download "ner_random0_seed0" "${1}"
85
+ python model_finetuning_ner.py -m "${1}" -d "ner_random0_seed0" --skip-train
86
+ download "ner_random1_seed0" "${1}"
87
+ python model_finetuning_ner.py -m "${1}" -d "ner_random1_seed0" --skip-train
88
+ download "ner_random2_seed0" "${1}"
89
+ python model_finetuning_ner.py -m "${1}" -d "ner_random2_seed0" --skip-train
90
+ download "ner_random3_seed0" "${1}"
91
+ python model_finetuning_ner.py -m "${1}" -d "ner_random3_seed0" --skip-train
92
+ download "ner_random0_seed1" "${1}"
93
+ python model_finetuning_ner.py -m "${1}" -d "ner_random0_seed1" --skip-train
94
+ download "ner_random1_seed1" "${1}"
95
+ python model_finetuning_ner.py -m "${1}" -d "ner_random1_seed1" --skip-train
96
+ download "ner_random2_seed1" "${1}"
97
+ python model_finetuning_ner.py -m "${1}" -d "ner_random2_seed1" --skip-train
98
+ download "ner_random3_seed1" "${1}"
99
+ python model_finetuning_ner.py -m "${1}" -d "ner_random3_seed1" --skip-train
100
+ download "ner_random0_seed2" "${1}"
101
+ python model_finetuning_ner.py -m "${1}" -d "ner_random0_seed2" --skip-train
102
+ download "ner_random1_seed2" "${1}"
103
+ python model_finetuning_ner.py -m "${1}" -d "ner_random1_seed2" --skip-train
104
+ download "ner_random2_seed2" "${1}"
105
+ python model_finetuning_ner.py -m "${1}" -d "ner_random2_seed2" --skip-train
106
+ download "ner_random3_seed2" "${1}"
107
+ python model_finetuning_ner.py -m "${1}" -d "ner_random3_seed2" --skip-train
108
+ }
109
+
110
+ fix "roberta-base"
111
+ fix "vinai/bertweet-base"
112
+ fix "jhu-clsp/bernice"
113
+ fix "roberta-large"
114
+ fix "vinai/bertweet-large"
115
+ fix "cardiffnlp/twitter-roberta-base-2019-90m"
116
+ fix "cardiffnlp/twitter-roberta-base-dec2020"
117
+ fix "cardiffnlp/twitter-roberta-base-2021-124m"
118
+ fix "cardiffnlp/twitter-roberta-base-2022-154m"
119
+ fix "cardiffnlp/twitter-roberta-large-2022-154m"
120
+
experiments/model_finetuning_ner.py CHANGED
@@ -21,6 +21,8 @@ from transformers import AutoTokenizer, AutoModelForTokenClassification, Trainin
21
  from huggingface_hub import Repository
22
 
23
  logging.basicConfig(format="%(asctime)s %(levelname)-8s %(message)s", level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S")
 
 
24
  EVAL_STEP = 500
25
  RANDOM_SEED = 42
26
  N_TRIALS = 10
 
21
  from huggingface_hub import Repository
22
 
23
  logging.basicConfig(format="%(asctime)s %(levelname)-8s %(message)s", level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S")
24
+ os.environ["WANDB_DISABLED"] = "true"
25
+
26
  EVAL_STEP = 500
27
  RANDOM_SEED = 42
28
  N_TRIALS = 10
experiments/model_finetuning_nerd.py CHANGED
@@ -21,6 +21,8 @@ from transformers import AutoTokenizer, AutoModelForSequenceClassification, Trai
21
  from huggingface_hub import Repository
22
 
23
  logging.basicConfig(format="%(asctime)s %(levelname)-8s %(message)s", level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S")
 
 
24
  EVAL_STEP = 500
25
  RANDOM_SEED = 42
26
  N_TRIALS = 10
 
21
  from huggingface_hub import Repository
22
 
23
  logging.basicConfig(format="%(asctime)s %(levelname)-8s %(message)s", level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S")
24
+ os.environ["WANDB_DISABLED"] = "true"
25
+
26
  EVAL_STEP = 500
27
  RANDOM_SEED = 42
28
  N_TRIALS = 10
experiments/model_finetuning_sentiment.py CHANGED
@@ -21,6 +21,8 @@ from transformers import AutoTokenizer, AutoModelForSequenceClassification, Trai
21
  from huggingface_hub import Repository
22
 
23
  logging.basicConfig(format="%(asctime)s %(levelname)-8s %(message)s", level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S")
 
 
24
  EVAL_STEP = 500
25
  RANDOM_SEED = 42
26
  N_TRIALS = 10
 
21
  from huggingface_hub import Repository
22
 
23
  logging.basicConfig(format="%(asctime)s %(levelname)-8s %(message)s", level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S")
24
+ os.environ["WANDB_DISABLED"] = "true"
25
+
26
  EVAL_STEP = 500
27
  RANDOM_SEED = 42
28
  N_TRIALS = 10
experiments/requirements.txt CHANGED
@@ -10,4 +10,5 @@ accelerate==0.23.0
10
  evaluate==0.4.1
11
  sentencepiece==0.1.99
12
  protobuf==4.24.4
13
- seqeval==1.2.2
 
 
10
  evaluate==0.4.1
11
  sentencepiece==0.1.99
12
  protobuf==4.24.4
13
+ seqeval==1.2.2
14
+ wandb==0.16.0