|
import os |
|
import json |
|
|
|
import pandas as pd |
|
from datasets import load_dataset |
|
|
|
root_dir = "experiments/prediction_files" |
|
id_to_label = { |
|
'0': 'arts_&_culture', |
|
'1': 'business_&_entrepreneurs', |
|
'2': 'celebrity_&_pop_culture', |
|
'3': 'diaries_&_daily_life', |
|
'4': 'family', |
|
'5': 'fashion_&_style', |
|
'6': 'film_tv_&_video', |
|
'7': 'fitness_&_health', |
|
'8': 'food_&_dining', |
|
'9': 'gaming', |
|
'10': 'learning_&_educational', |
|
'11': 'music', |
|
'12': 'news_&_social_concern', |
|
'13': 'other_hobbies', |
|
'14': 'relationships', |
|
'15': 'science_&_technology', |
|
'16': 'sports', |
|
'17': 'travel_&_adventure', |
|
'18': 'youth_&_student_life' |
|
} |
|
tasks = ["nerd", "sentiment", "hate"] |
|
splits = ["test_1", "test_2", "test_3", "test_4"] |
|
model_list = [ |
|
"roberta-base", |
|
"bertweet-base", |
|
"bernice", |
|
"roberta-large", |
|
"bertweet-large", |
|
"twitter-roberta-base-2019-90m", |
|
"twitter-roberta-base-dec2020", |
|
"twitter-roberta-base-2021-124m", |
|
"twitter-roberta-base-2022-154m", |
|
"twitter-roberta-large-2022-154m" |
|
] |
|
references = {} |
|
|
|
for task in tasks: |
|
references[task] = {} |
|
for s in splits: |
|
data = load_dataset("tweettemposhift/tweet_temporal_shift", f"{task}_temporal", split=s) |
|
references[task][s] = [str(i) for i in data['gold_label_binary']] |
|
os.makedirs("experiments/analysis", exist_ok=True) |
|
|
|
|
|
output = {} |
|
for model_m in model_list: |
|
flags = [] |
|
for s in splits: |
|
with open(f"{root_dir}/hate-hate_temporal-{model_m}/{s}.jsonl") as f: |
|
pred = [json.loads(i)["label"] for i in f.read().split('\n') if len(i)] |
|
flags += [a == b for a, b in zip(references["hate"][s], pred)] |
|
count = {} |
|
for seed_s in range(3): |
|
flags_rand = [] |
|
for random_r in range(4): |
|
with open(f"{root_dir}/hate-hate_random{random_r}_seed{seed_s}-{model_m}/test_{random_r + 1}.jsonl") as f: |
|
pred = [json.loads(i)["label"] for i in f.read().split('\n') if len(i)] |
|
flags_rand += [a == b for a, b in zip(references["hate"][f"test_{random_r + 1}"], pred)] |
|
count[f"{model_m}_{seed_s}"] = [not x and y for x, y in zip(flags, flags_rand)] |
|
output[model_m] = pd.DataFrame(count).sum(1) |
|
df_main = [] |
|
for s in splits: |
|
df_main.append(load_dataset("tweettemposhift/tweet_temporal_shift", "hate_temporal", split=s).to_pandas()) |
|
df_main = pd.concat(df_main) |
|
df_main["error_count"] = pd.DataFrame(output).sum(1).values |
|
df_main.sort_values("error_count", ascending=False).to_csv("experiments/analysis/hate.csv") |
|
|
|
output = {} |
|
for model_m in model_list: |
|
flags = [] |
|
for s in splits: |
|
with open(f"{root_dir}/nerd-nerd_temporal-{model_m}/{s}.jsonl") as f: |
|
pred = [json.loads(i)["label"] for i in f.read().split('\n') if len(i)] |
|
flags += [a == b for a, b in zip(references["nerd"][s], pred)] |
|
count = {} |
|
for seed_s in range(3): |
|
flags_rand = [] |
|
for random_r in range(4): |
|
with open(f"{root_dir}/nerd-nerd_random{random_r}_seed{seed_s}-{model_m}/test_{random_r + 1}.jsonl") as f: |
|
pred = [json.loads(i)["label"] for i in f.read().split('\n') if len(i)] |
|
flags_rand += [a == b for a, b in zip(references["nerd"][f"test_{random_r + 1}"], pred)] |
|
count[f"{model_m}_{seed_s}"] = [not x and y for x, y in zip(flags, flags_rand)] |
|
output[model_m] = pd.DataFrame(count).sum(1) |
|
df_main = [] |
|
for s in splits: |
|
df_main.append(load_dataset("tweettemposhift/tweet_temporal_shift", "nerd_temporal", split=s).to_pandas()) |
|
df_main = pd.concat(df_main) |
|
df_main["error_count"] = pd.DataFrame(output).sum(1).values |
|
df_main.sort_values("error_count", ascending=False).to_csv("experiments/analysis/nerd.csv") |
|
|
|
|
|
output = {} |
|
for model_m in model_list: |
|
flags = [] |
|
for s in splits: |
|
with open(f"{root_dir}/sentiment-sentiment_small_temporal-{model_m}/{s}.jsonl") as f: |
|
pred = [json.loads(i)["label"] for i in f.read().split('\n') if len(i)] |
|
flags += [a == b for a, b in zip(references["sentiment"][s], pred)] |
|
count = {} |
|
for seed_s in range(3): |
|
flags_rand = [] |
|
for random_r in range(4): |
|
with open(f"{root_dir}/sentiment-sentiment_small_random{random_r}_seed{seed_s}-{model_m}/test_{random_r + 1}.jsonl") as f: |
|
pred = [json.loads(i)["label"] for i in f.read().split('\n') if len(i)] |
|
flags_rand += [a == b for a, b in zip(references["sentiment"][f"test_{random_r + 1}"], pred)] |
|
count[f"{model_m}_{seed_s}"] = [not x and y for x, y in zip(flags, flags_rand)] |
|
output[model_m] = pd.DataFrame(count).sum(1) |
|
df_main = [] |
|
for s in splits: |
|
df_main.append(load_dataset("tweettemposhift/tweet_temporal_shift", "sentiment_small_temporal", split=s).to_pandas()) |
|
df_main = pd.concat(df_main) |
|
df_main["error_count"] = pd.DataFrame(output).sum(1).values |
|
df_main.sort_values("error_count", ascending=False).to_csv("experiments/analysis/sentiment.csv") |
|
|
|
|
|
output = {} |
|
for model_m in model_list: |
|
flags = [] |
|
for s in splits: |
|
with open(f"{root_dir}/ner-ner_temporal-{model_m}/{s}.jsonl") as f: |
|
tmp = [json.loads(i) for i in f.read().split('\n') if len(i)] |
|
label = [[x for x, y in zip(i["label"], i["prediction"]) if x != -100] for i in tmp] |
|
pred = [[y for x, y in zip(i["label"], i["prediction"]) if x != -100] for i in tmp] |
|
flags += [a == b for a, b in zip(label, pred)] |
|
count = {} |
|
for seed_s in range(3): |
|
flags_rand = [] |
|
for random_r in range(4): |
|
with open(f"{root_dir}/ner-ner_random{random_r}_seed{seed_s}-{model_m}/test_{random_r + 1}.jsonl") as f: |
|
tmp = [json.loads(i) for i in f.read().split('\n') if len(i)] |
|
label = [[x for x, y in zip(i["label"], i["prediction"]) if x != -100] for i in tmp] |
|
pred = [[y for x, y in zip(i["label"], i["prediction"]) if x != -100] for i in tmp] |
|
flags_rand += [a == b for a, b in zip(label, pred)] |
|
count[f"{model_m}_{seed_s}"] = [not x and y for x, y in zip(flags, flags_rand)] |
|
output[model_m] = pd.DataFrame(count).sum(1) |
|
df_main = [] |
|
for s in splits: |
|
df_main.append(load_dataset("tweettemposhift/tweet_temporal_shift", "ner_temporal", split=s).to_pandas()) |
|
df_main = pd.concat(df_main) |
|
df_main["error_count"] = pd.DataFrame(output).sum(1).values |
|
df_main.sort_values("error_count", ascending=False).to_csv("experiments/analysis/ner.csv") |
|
|
|
|