init
Browse files
experiments/model_finetuning_emoji.py
CHANGED
@@ -95,7 +95,7 @@ def main(
|
|
95 |
validate_index = validate_index[:N_VALIDATE_SIZE]
|
96 |
|
97 |
trainer = Trainer(
|
98 |
-
model=AutoModelForSequenceClassification.from_pretrained(model, num_labels=
|
99 |
args=TrainingArguments(
|
100 |
output_dir=output_dir,
|
101 |
evaluation_strategy="steps",
|
@@ -106,7 +106,7 @@ def main(
|
|
106 |
eval_dataset=tokenized_datasets["validation"].select(validate_index),
|
107 |
compute_metrics=compute_metric,
|
108 |
model_init=lambda x: AutoModelForSequenceClassification.from_pretrained(
|
109 |
-
model, return_dict=True, num_labels=
|
110 |
)
|
111 |
)
|
112 |
|
@@ -141,7 +141,7 @@ def main(
|
|
141 |
metric = {}
|
142 |
for single_test in test_split:
|
143 |
trainer = Trainer(
|
144 |
-
model=AutoModelForSequenceClassification.from_pretrained(best_model_path, num_labels=
|
145 |
args=TrainingArguments(
|
146 |
output_dir=output_dir,
|
147 |
evaluation_strategy="no",
|
@@ -159,7 +159,7 @@ def main(
|
|
159 |
if not skip_upload:
|
160 |
logging.info("uploading to huggingface")
|
161 |
model_organization = "tweettemposhift"
|
162 |
-
model_instance = AutoModelForSequenceClassification.from_pretrained(best_model_path, num_labels=
|
163 |
model_instance.push_to_hub(f"{model_organization}/{model_alias}", use_auth_token=True)
|
164 |
tokenizer.push_to_hub(f"{model_organization}/{model_alias}", use_auth_token=True)
|
165 |
repo = Repository(model_alias, f"{model_organization}/{model_alias}")
|
|
|
95 |
validate_index = validate_index[:N_VALIDATE_SIZE]
|
96 |
|
97 |
trainer = Trainer(
|
98 |
+
model=AutoModelForSequenceClassification.from_pretrained(model, num_labels=99),
|
99 |
args=TrainingArguments(
|
100 |
output_dir=output_dir,
|
101 |
evaluation_strategy="steps",
|
|
|
106 |
eval_dataset=tokenized_datasets["validation"].select(validate_index),
|
107 |
compute_metrics=compute_metric,
|
108 |
model_init=lambda x: AutoModelForSequenceClassification.from_pretrained(
|
109 |
+
model, return_dict=True, num_labels=99,
|
110 |
)
|
111 |
)
|
112 |
|
|
|
141 |
metric = {}
|
142 |
for single_test in test_split:
|
143 |
trainer = Trainer(
|
144 |
+
model=AutoModelForSequenceClassification.from_pretrained(best_model_path, num_labels=99),
|
145 |
args=TrainingArguments(
|
146 |
output_dir=output_dir,
|
147 |
evaluation_strategy="no",
|
|
|
159 |
if not skip_upload:
|
160 |
logging.info("uploading to huggingface")
|
161 |
model_organization = "tweettemposhift"
|
162 |
+
model_instance = AutoModelForSequenceClassification.from_pretrained(best_model_path, num_labels=99)
|
163 |
model_instance.push_to_hub(f"{model_organization}/{model_alias}", use_auth_token=True)
|
164 |
tokenizer.push_to_hub(f"{model_organization}/{model_alias}", use_auth_token=True)
|
165 |
repo = Repository(model_alias, f"{model_organization}/{model_alias}")
|
tweet_temporal_shift.py
CHANGED
@@ -2,7 +2,7 @@
|
|
2 |
import json
|
3 |
import datasets
|
4 |
|
5 |
-
_VERSION = "1.1.
|
6 |
_TWEET_TEMPORAL_DESCRIPTION = """"""
|
7 |
_TWEET_TEMPORAL_CITATION = """"""
|
8 |
_TWEET_TOPIC_DESCRIPTION = """
|
@@ -232,6 +232,7 @@ class TweetTemporalShift(datasets.GeneratorBasedBuilder):
|
|
232 |
with open(dl_manager.download(url_map)) as f:
|
233 |
label_classes = f.readlines()
|
234 |
label_classes = [x.strip('\n') for x in label_classes]
|
|
|
235 |
features['gold_label'] = datasets.features.ClassLabel(names=label_classes)
|
236 |
elif "sentiment" in self.config.name:
|
237 |
features["text"] = datasets.Value("string")
|
|
|
2 |
import json
|
3 |
import datasets
|
4 |
|
5 |
+
_VERSION = "1.1.2"
|
6 |
_TWEET_TEMPORAL_DESCRIPTION = """"""
|
7 |
_TWEET_TEMPORAL_CITATION = """"""
|
8 |
_TWEET_TOPIC_DESCRIPTION = """
|
|
|
232 |
with open(dl_manager.download(url_map)) as f:
|
233 |
label_classes = f.readlines()
|
234 |
label_classes = [x.strip('\n') for x in label_classes]
|
235 |
+
label_classes = [x for n, x in enumerate(label_classes) if n != 68]
|
236 |
features['gold_label'] = datasets.features.ClassLabel(names=label_classes)
|
237 |
elif "sentiment" in self.config.name:
|
238 |
features["text"] = datasets.Value("string")
|